diff --git a/BUILD_WIN.md b/BUILD_WIN.md
index 3222e75c66..5836d5bfb5 100644
--- a/BUILD_WIN.md
+++ b/BUILD_WIN.md
@@ -9,15 +9,17 @@ Note: The prerequisites will require about 10 GB of space on your drive. You wil
If you don’t have Community or Professional edition of Visual Studio 2017, download [Visual Studio Community 2017](https://www.visualstudio.com/downloads/).
-When selecting components, check "Desktop development with C++." Also check "Windows 8.1 SDK and UCRT SDK" and "VC++ 2015.3 v140 toolset (x86,x64)" on the Summary toolbar on the right.
+When selecting components, check "Desktop development with C++." Also on the right on the Summary toolbar, check "Windows 8.1 SDK and UCRT SDK" and "VC++ 2015.3 v140 toolset (x86,x64)".
### Step 2. Installing CMake
-Download and install the latest version of CMake 3.9. Download the file named win64-x64 Installer from the [CMake Website](https://cmake.org/download/). Make sure to check "Add CMake to system PATH for all users" when prompted during installation.
+Download and install the latest version of CMake 3.9.
+
+Download the file named win64-x64 Installer from the [CMake Website](https://cmake.org/download/). You can access the installer on this [3.9 Version page](https://cmake.org/files/v3.9/). During installation, make sure to check "Add CMake to system PATH for all users" when prompted.
### Step 3. Installing Qt
-Download and install the [Qt Online Installer](https://www.qt.io/download-open-source/?hsCtaTracking=f977210e-de67-475f-a32b-65cec207fd03%7Cd62710cd-e1db-46aa-8d4d-2f1c1ffdacea). While installing, you only need to have the following components checked under Qt 5.10.1: "msvc2017 64-bit", "Qt WebEngine", and "Qt Script (Deprecated)".
+Download and install the [Qt Open Source Online Installer](https://www.qt.io/download-open-source/?hsCtaTracking=f977210e-de67-475f-a32b-65cec207fd03%7Cd62710cd-e1db-46aa-8d4d-2f1c1ffdacea). While installing, you only need to have the following components checked under Qt 5.10.1: "msvc2017 64-bit", "Qt WebEngine", and "Qt Script (Deprecated)".
Note: Installing the Sources is optional but recommended if you have room for them (~2GB).
@@ -56,9 +58,9 @@ Where `%HIFI_DIR%` is the directory for the highfidelity repository.
Open `%HIFI_DIR%\build\hifi.sln` using Visual Studio.
-Change the Solution Configuration (next to the green play button) from "Debug" to "Release" for best performance.
+Change the Solution Configuration (menu ribbon under the menu bar, next to the green play button) from "Debug" to "Release" for best performance.
-Run `Build > Build Solution`.
+Run from the menu bar `Build > Build Solution`.
### Step 9. Testing Interface
@@ -66,7 +68,7 @@ Create another environment variable (see Step #4)
* Set "Variable name": `_NO_DEBUG_HEAP`
* Set "Variable value": `1`
-In Visual Studio, right+click "interface" under the Apps folder in Solution Explorer and select "Set as Startup Project". Run `Debug > Start Debugging`.
+In Visual Studio, right+click "interface" under the Apps folder in Solution Explorer and select "Set as Startup Project". Run from the menu bar `Debug > Start Debugging`.
Now, you should have a full build of High Fidelity and be able to run the Interface using Visual Studio. Please check our [Docs](https://wiki.highfidelity.com/wiki/Main_Page) for more information regarding the programming workflow.
diff --git a/android/app/build.gradle b/android/app/build.gradle
index 70f7c622a0..46de9642d9 100644
--- a/android/app/build.gradle
+++ b/android/app/build.gradle
@@ -27,6 +27,14 @@ android {
'-DDISABLE_KTX_CACHE=OFF'
}
}
+ signingConfigs {
+ release {
+ storeFile project.hasProperty("HIFI_ANDROID_KEYSTORE") ? file(HIFI_ANDROID_KEYSTORE) : null
+ storePassword project.hasProperty("HIFI_ANDROID_KEYSTORE_PASSWORD") ? HIFI_ANDROID_KEYSTORE_PASSWORD : ''
+ keyAlias project.hasProperty("HIFI_ANDROID_KEY_ALIAS") ? HIFI_ANDROID_KEY_ALIAS : ''
+ keyPassword project.hasProperty("HIFI_ANDROID_KEY_PASSWORD") ? HIFI_ANDROID_KEY_PASSWORD : ''
+ }
+ }
}
compileOptions {
@@ -38,6 +46,10 @@ android {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
+ signingConfig project.hasProperty("HIFI_ANDROID_KEYSTORE") &&
+ project.hasProperty("HIFI_ANDROID_KEYSTORE_PASSWORD") &&
+ project.hasProperty("HIFI_ANDROID_KEY_ALIAS") &&
+ project.hasProperty("HIFI_ANDROID_KEY_PASSWORD")? signingConfigs.release : null
}
}
diff --git a/android/app/src/main/AndroidManifest.xml b/android/app/src/main/AndroidManifest.xml
index e105f5bccf..0b52046057 100644
--- a/android/app/src/main/AndroidManifest.xml
+++ b/android/app/src/main/AndroidManifest.xml
@@ -49,12 +49,6 @@
android:label="@string/app_name"
android:launchMode="singleTop"
>
-
-
-
-
-
-
diff --git a/cmake/externals/serverless-content/CMakeLists.txt b/cmake/externals/serverless-content/CMakeLists.txt
index cad6d40b49..aa1c59a86b 100644
--- a/cmake/externals/serverless-content/CMakeLists.txt
+++ b/cmake/externals/serverless-content/CMakeLists.txt
@@ -4,8 +4,8 @@ set(EXTERNAL_NAME serverless-content)
ExternalProject_Add(
${EXTERNAL_NAME}
- URL http://cdn.highfidelity.com/content-sets/serverless-tutorial-RC67-v4.zip
- URL_MD5 ba32aed18bfeaac4ccaf5ebb8ea3e804
+ URL http://cdn.highfidelity.com/content-sets/serverless-tutorial-RC68.zip
+ URL_MD5 a068f74d4045e257cfa7926fe6e38ad5
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
diff --git a/cmake/macros/SetupHifiTestCase.cmake b/cmake/macros/SetupHifiTestCase.cmake
index b0edb41e36..017a0222f5 100644
--- a/cmake/macros/SetupHifiTestCase.cmake
+++ b/cmake/macros/SetupHifiTestCase.cmake
@@ -61,16 +61,21 @@ macro(SETUP_HIFI_TESTCASE)
endif()
endforeach()
+
# Find test classes to build into test executables.
# Warn about any .cpp files that are *not* test classes (*Test[s].cpp), since those files will not be used.
foreach (SRC_FILE ${TEST_PROJ_SRC_FILES})
string(REGEX MATCH ".+Tests?\\.cpp$" TEST_CPP_FILE ${SRC_FILE})
string(REGEX MATCH ".+\\.cpp$" NON_TEST_CPP_FILE ${SRC_FILE})
+ string(REGEX MATCH ".+\\.qrc$" QRC_FILE ${SRC_FILE})
if (TEST_CPP_FILE)
list(APPEND TEST_CASE_FILES ${TEST_CPP_FILE})
elseif (NON_TEST_CPP_FILE)
message(WARNING "ignoring .cpp file (not a test class -- this will not be linked or compiled!): " ${NON_TEST_CPP_FILE})
endif ()
+ if (QRC_FILE)
+ list(APPEND EXTRA_FILES ${QRC_FILE})
+ endif()
endforeach ()
if (TEST_CASE_FILES)
@@ -88,7 +93,7 @@ macro(SETUP_HIFI_TESTCASE)
# grab the implemenation and header files
set(TARGET_SRCS ${TEST_FILE}) # only one source / .cpp file (the test class)
- add_executable(${TARGET_NAME} ${TEST_FILE})
+ add_executable(${TARGET_NAME} ${TEST_FILE} ${EXTRA_FILES})
add_test(${TARGET_NAME}-test ${TARGET_NAME})
set_target_properties(${TARGET_NAME} PROPERTIES
EXCLUDE_FROM_DEFAULT_BUILD TRUE
diff --git a/domain-server/src/DomainGatekeeper.cpp b/domain-server/src/DomainGatekeeper.cpp
index d78f0aaeb3..47b55bb5c2 100644
--- a/domain-server/src/DomainGatekeeper.cpp
+++ b/domain-server/src/DomainGatekeeper.cpp
@@ -479,7 +479,7 @@ SharedNodePointer DomainGatekeeper::processAgentConnectRequest(const NodeConnect
limitedNodeList->killNodeWithUUID(existingNodeID);
}
- // add the connecting node (or re-use the matched one from eachNodeBreakable above)
+ // add the connecting node
SharedNodePointer newNode = addVerifiedNodeFromConnectRequest(nodeConnection);
// set the edit rights for this user
@@ -508,26 +508,22 @@ SharedNodePointer DomainGatekeeper::processAgentConnectRequest(const NodeConnect
return newNode;
}
-SharedNodePointer DomainGatekeeper::addVerifiedNodeFromConnectRequest(const NodeConnectionData& nodeConnection,
- QUuid nodeID) {
+SharedNodePointer DomainGatekeeper::addVerifiedNodeFromConnectRequest(const NodeConnectionData& nodeConnection) {
HifiSockAddr discoveredSocket = nodeConnection.senderSockAddr;
SharedNetworkPeer connectedPeer = _icePeers.value(nodeConnection.connectUUID);
- if (connectedPeer) {
- // this user negotiated a connection with us via ICE, so re-use their ICE client ID
- nodeID = nodeConnection.connectUUID;
-
- if (connectedPeer->getActiveSocket()) {
- // set their discovered socket to whatever the activated socket on the network peer object was
- discoveredSocket = *connectedPeer->getActiveSocket();
- }
- } else {
- // we got a connectUUID we didn't recognize, either use the hinted node ID or randomly generate a new one
- if (nodeID.isNull()) {
- nodeID = QUuid::createUuid();
- }
+ if (connectedPeer && connectedPeer->getActiveSocket()) {
+ // set their discovered socket to whatever the activated socket on the network peer object was
+ discoveredSocket = *connectedPeer->getActiveSocket();
}
+ // create a new node ID for the verified connecting node
+ auto nodeID = QUuid::createUuid();
+
+ // add a mapping from connection node ID to ICE peer ID
+ // so that we can remove the ICE peer once we see this node connect
+ _nodeToICEPeerIDs.insert(nodeID, nodeConnection.connectUUID);
+
auto limitedNodeList = DependencyManager::get();
Node::LocalID newLocalID = findOrCreateLocalID(nodeID);
@@ -541,6 +537,15 @@ SharedNodePointer DomainGatekeeper::addVerifiedNodeFromConnectRequest(const Node
return newNode;
}
+void DomainGatekeeper::cleanupICEPeerForNode(const QUuid& nodeID) {
+ // remove this node ID from our node to ICE peer ID map
+ // and the associated ICE peer (if it still exists)
+ auto icePeerID = _nodeToICEPeerIDs.take(nodeID);
+ if (!icePeerID.isNull()) {
+ _icePeers.remove(icePeerID);
+ }
+}
+
bool DomainGatekeeper::verifyUserSignature(const QString& username,
const QByteArray& usernameSignature,
const HifiSockAddr& senderSockAddr) {
diff --git a/domain-server/src/DomainGatekeeper.h b/domain-server/src/DomainGatekeeper.h
index 8402e58559..2cb9b4c8a9 100644
--- a/domain-server/src/DomainGatekeeper.h
+++ b/domain-server/src/DomainGatekeeper.h
@@ -39,8 +39,8 @@ public:
void addPendingAssignedNode(const QUuid& nodeUUID, const QUuid& assignmentUUID,
const QUuid& walletUUID, const QString& nodeVersion);
QUuid assignmentUUIDForPendingAssignment(const QUuid& tempUUID);
-
- void removeICEPeer(const QUuid& peerUUID) { _icePeers.remove(peerUUID); }
+
+ void cleanupICEPeerForNode(const QUuid& nodeID);
Node::LocalID findOrCreateLocalID(const QUuid& uuid);
@@ -77,8 +77,7 @@ private:
SharedNodePointer processAgentConnectRequest(const NodeConnectionData& nodeConnection,
const QString& username,
const QByteArray& usernameSignature);
- SharedNodePointer addVerifiedNodeFromConnectRequest(const NodeConnectionData& nodeConnection,
- QUuid nodeID = QUuid());
+ SharedNodePointer addVerifiedNodeFromConnectRequest(const NodeConnectionData& nodeConnection);
bool verifyUserSignature(const QString& username, const QByteArray& usernameSignature,
const HifiSockAddr& senderSockAddr);
@@ -101,6 +100,10 @@ private:
std::unordered_map _pendingAssignedNodes;
QHash _icePeers;
+
+ using ConnectingNodeID = QUuid;
+ using ICEPeerID = QUuid;
+ QHash _nodeToICEPeerIDs;
QHash _connectionTokenHash;
diff --git a/domain-server/src/DomainServer.cpp b/domain-server/src/DomainServer.cpp
index baeac043e4..4e65df495c 100644
--- a/domain-server/src/DomainServer.cpp
+++ b/domain-server/src/DomainServer.cpp
@@ -1017,15 +1017,22 @@ void DomainServer::processListRequestPacket(QSharedPointer mess
sendingNode->setPublicSocket(nodeRequestData.publicSockAddr);
sendingNode->setLocalSocket(nodeRequestData.localSockAddr);
- // update the NodeInterestSet in case there have been any changes
DomainServerNodeData* nodeData = static_cast(sendingNode->getLinkedData());
+ if (!nodeData->hasCheckedIn()) {
+ nodeData->setHasCheckedIn(true);
+
+ // on first check in, make sure we've cleaned up any ICE peer for this node
+ _gatekeeper.cleanupICEPeerForNode(sendingNode->getUUID());
+ }
+
// guard against patched agents asking to hear about other agents
auto safeInterestSet = nodeRequestData.interestList.toSet();
if (sendingNode->getType() == NodeType::Agent) {
safeInterestSet.remove(NodeType::Agent);
}
+ // update the NodeInterestSet in case there have been any changes
nodeData->setNodeInterestSet(safeInterestSet);
// update the connecting hostname in case it has changed
@@ -2945,7 +2952,7 @@ void DomainServer::nodeAdded(SharedNodePointer node) {
void DomainServer::nodeKilled(SharedNodePointer node) {
// if this peer connected via ICE then remove them from our ICE peers hash
- _gatekeeper.removeICEPeer(node->getUUID());
+ _gatekeeper.cleanupICEPeerForNode(node->getUUID());
DomainServerNodeData* nodeData = static_cast(node->getLinkedData());
@@ -2978,6 +2985,8 @@ void DomainServer::nodeKilled(SharedNodePointer node) {
}
}
}
+
+ broadcastNodeDisconnect(node);
}
SharedAssignmentPointer DomainServer::dequeueMatchingAssignment(const QUuid& assignmentUUID, NodeType_t nodeType) {
@@ -3163,18 +3172,23 @@ void DomainServer::handleKillNode(SharedNodePointer nodeToKill) {
const QUuid& nodeUUID = nodeToKill->getUUID();
limitedNodeList->killNodeWithUUID(nodeUUID);
+}
- static auto removedNodePacket = NLPacket::create(PacketType::DomainServerRemovedNode, NUM_BYTES_RFC4122_UUID);
+void DomainServer::broadcastNodeDisconnect(const SharedNodePointer& disconnectedNode) {
+ auto limitedNodeList = DependencyManager::get();
+
+ static auto removedNodePacket = NLPacket::create(PacketType::DomainServerRemovedNode, NUM_BYTES_RFC4122_UUID, true);
removedNodePacket->reset();
- removedNodePacket->write(nodeUUID.toRfc4122());
+ removedNodePacket->write(disconnectedNode->getUUID().toRfc4122());
// broadcast out the DomainServerRemovedNode message
- limitedNodeList->eachMatchingNode([this, &nodeToKill](const SharedNodePointer& otherNode) -> bool {
+ limitedNodeList->eachMatchingNode([this, &disconnectedNode](const SharedNodePointer& otherNode) -> bool {
// only send the removed node packet to nodes that care about the type of node this was
- return isInInterestSet(otherNode, nodeToKill);
+ return isInInterestSet(otherNode, disconnectedNode);
}, [&limitedNodeList](const SharedNodePointer& otherNode){
- limitedNodeList->sendUnreliablePacket(*removedNodePacket, *otherNode);
+ auto removedNodePacketCopy = NLPacket::createCopy(*removedNodePacket);
+ limitedNodeList->sendPacket(std::move(removedNodePacketCopy), *otherNode);
});
}
diff --git a/domain-server/src/DomainServer.h b/domain-server/src/DomainServer.h
index b118008d3d..01adbd99a9 100644
--- a/domain-server/src/DomainServer.h
+++ b/domain-server/src/DomainServer.h
@@ -165,6 +165,7 @@ private:
unsigned int countConnectedUsers();
void handleKillNode(SharedNodePointer nodeToKill);
+ void broadcastNodeDisconnect(const SharedNodePointer& disconnnectedNode);
void sendDomainListToNode(const SharedNodePointer& node, const HifiSockAddr& senderSockAddr);
diff --git a/domain-server/src/DomainServerNodeData.h b/domain-server/src/DomainServerNodeData.h
index 6b8e9a1718..f465cceb96 100644
--- a/domain-server/src/DomainServerNodeData.h
+++ b/domain-server/src/DomainServerNodeData.h
@@ -67,8 +67,11 @@ public:
const QString& getPlaceName() { return _placeName; }
void setPlaceName(const QString& placeName) { _placeName = placeName; }
- bool wasAssigned() const { return _wasAssigned; };
+ bool wasAssigned() const { return _wasAssigned; }
void setWasAssigned(bool wasAssigned) { _wasAssigned = wasAssigned; }
+
+ bool hasCheckedIn() const { return _hasCheckedIn; }
+ void setHasCheckedIn(bool hasCheckedIn) { _hasCheckedIn = hasCheckedIn; }
private:
QJsonObject overrideValuesIfNeeded(const QJsonObject& newStats);
@@ -94,6 +97,8 @@ private:
QString _placeName;
bool _wasAssigned { false };
+
+ bool _hasCheckedIn { false };
};
#endif // hifi_DomainServerNodeData_h
diff --git a/interface/resources/qml/controls-uit/TextField.qml b/interface/resources/qml/controls-uit/TextField.qml
index f94541897b..6743d08275 100644
--- a/interface/resources/qml/controls-uit/TextField.qml
+++ b/interface/resources/qml/controls-uit/TextField.qml
@@ -163,10 +163,18 @@ TextField {
text: textField.label
colorScheme: textField.colorScheme
anchors.left: parent.left
- anchors.right: parent.right
+
+ Binding on anchors.right {
+ when: parent.right
+ value: parent.right
+ }
+ Binding on wrapMode {
+ when: parent.right
+ value: Text.WordWrap
+ }
+
anchors.bottom: parent.top
anchors.bottomMargin: 3
- wrapMode: Text.WordWrap
visible: label != ""
}
}
diff --git a/interface/resources/qml/dialogs/+android/FileDialog.qml b/interface/resources/qml/dialogs/+android/FileDialog.qml
index 548ab453a7..86e6e1ef6c 100644
--- a/interface/resources/qml/dialogs/+android/FileDialog.qml
+++ b/interface/resources/qml/dialogs/+android/FileDialog.qml
@@ -57,7 +57,7 @@ ModalWindow {
property int iconSize: 40
property bool selectDirectory: false;
- property bool showHidden: false;
+ property bool showHidden: true;
// FIXME implement
property bool multiSelect: false;
property bool saveDialog: false;
@@ -324,8 +324,10 @@ ModalWindow {
showDirsFirst: true
showDotAndDotDot: false
showFiles: !root.selectDirectory
+ showHidden: root.showHidden
Component.onCompleted: {
showFiles = !root.selectDirectory
+ showHidden = root.showHidden
}
onFolderChanged: {
diff --git a/interface/resources/qml/dialogs/FileDialog.qml b/interface/resources/qml/dialogs/FileDialog.qml
index 154d66378b..49bfe78434 100644
--- a/interface/resources/qml/dialogs/FileDialog.qml
+++ b/interface/resources/qml/dialogs/FileDialog.qml
@@ -58,7 +58,7 @@ ModalWindow {
property int iconSize: 40
property bool selectDirectory: false;
- property bool showHidden: false;
+ property bool showHidden: true;
// FIXME implement
property bool multiSelect: false;
property bool saveDialog: false;
@@ -325,8 +325,10 @@ ModalWindow {
showDirsFirst: true
showDotAndDotDot: false
showFiles: !root.selectDirectory
+ showHidden: root.showHidden
Component.onCompleted: {
showFiles = !root.selectDirectory
+ showHidden = root.showHidden
}
onFolderChanged: {
diff --git a/interface/resources/qml/dialogs/TabletFileDialog.qml b/interface/resources/qml/dialogs/TabletFileDialog.qml
index db15337913..e7c93e6d8e 100644
--- a/interface/resources/qml/dialogs/TabletFileDialog.qml
+++ b/interface/resources/qml/dialogs/TabletFileDialog.qml
@@ -55,7 +55,7 @@ TabletModalWindow {
property int iconSize: 40
property bool selectDirectory: false;
- property bool showHidden: false;
+ property bool showHidden: true;
// FIXME implement
property bool multiSelect: false;
property bool saveDialog: false;
@@ -288,8 +288,10 @@ TabletModalWindow {
showDirsFirst: true
showDotAndDotDot: false
showFiles: !root.selectDirectory
+ showHidden: root.showHidden
Component.onCompleted: {
showFiles = !root.selectDirectory
+ showHidden = root.showHidden
}
onFolderChanged: {
diff --git a/interface/resources/qml/hifi/tablet/tabletWindows/TabletFileDialog.qml b/interface/resources/qml/hifi/tablet/tabletWindows/TabletFileDialog.qml
index 08b0104fce..f3f98f24e5 100644
--- a/interface/resources/qml/hifi/tablet/tabletWindows/TabletFileDialog.qml
+++ b/interface/resources/qml/hifi/tablet/tabletWindows/TabletFileDialog.qml
@@ -52,7 +52,7 @@ Rectangle {
property int iconSize: 40
property bool selectDirectory: false;
- property bool showHidden: false;
+ property bool showHidden: true;
// FIXME implement
property bool multiSelect: false;
property bool saveDialog: false;
@@ -280,8 +280,10 @@ Rectangle {
showDirsFirst: true
showDotAndDotDot: false
showFiles: !root.selectDirectory
+ showHidden: root.showHidden
Component.onCompleted: {
showFiles = !root.selectDirectory
+ showHidden = root.showHidden
}
onFolderChanged: {
diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp
index b37bdffa65..77a20ed2e7 100644
--- a/interface/src/Application.cpp
+++ b/interface/src/Application.cpp
@@ -717,7 +717,7 @@ private:
* NavigationFocused
number number Not used.
*
*
- * @typedef Controller.Hardware-Application
+ * @typedef {object} Controller.Hardware-Application
*/
static const QString STATE_IN_HMD = "InHMD";
@@ -2492,6 +2492,7 @@ void Application::cleanupBeforeQuit() {
}
_window->saveGeometry();
+ _gpuContext->shutdown();
// Destroy third party processes after scripts have finished using them.
#ifdef HAVE_DDE
@@ -3014,9 +3015,11 @@ void Application::onDesktopRootItemCreated(QQuickItem* rootItem) {
auto surfaceContext = DependencyManager::get()->getSurfaceContext();
surfaceContext->setContextProperty("Stats", Stats::getInstance());
+#if !defined(Q_OS_ANDROID)
auto offscreenUi = DependencyManager::get();
auto qml = PathUtils::qmlUrl("AvatarInputsBar.qml");
offscreenUi->show(qml, "AvatarInputsBar");
+#endif
}
void Application::updateCamera(RenderArgs& renderArgs, float deltaTime) {
@@ -4641,12 +4644,6 @@ void Application::idle() {
_overlayConductor.update(secondsSinceLastUpdate);
- auto myAvatar = getMyAvatar();
- if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON || _myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
- Menu::getInstance()->setIsOptionChecked(MenuOption::FirstPerson, myAvatar->getBoomLength() <= MyAvatar::ZOOM_MIN);
- Menu::getInstance()->setIsOptionChecked(MenuOption::ThirdPerson, !(myAvatar->getBoomLength() <= MyAvatar::ZOOM_MIN));
- cameraMenuChanged();
- }
_gameLoopCounter.increment();
}
@@ -5196,6 +5193,21 @@ void Application::cameraModeChanged() {
cameraMenuChanged();
}
+void Application::changeViewAsNeeded(float boomLength) {
+ // Switch between first and third person views as needed
+ // This is called when the boom length has changed
+ bool boomLengthGreaterThanMinimum = (boomLength > MyAvatar::ZOOM_MIN);
+
+ if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON && boomLengthGreaterThanMinimum) {
+ Menu::getInstance()->setIsOptionChecked(MenuOption::FirstPerson, false);
+ Menu::getInstance()->setIsOptionChecked(MenuOption::ThirdPerson, true);
+ cameraMenuChanged();
+ } else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON && !boomLengthGreaterThanMinimum) {
+ Menu::getInstance()->setIsOptionChecked(MenuOption::FirstPerson, true);
+ Menu::getInstance()->setIsOptionChecked(MenuOption::ThirdPerson, false);
+ cameraMenuChanged();
+ }
+}
void Application::cameraMenuChanged() {
auto menu = Menu::getInstance();
diff --git a/interface/src/Application.h b/interface/src/Application.h
index bd3990a25e..e16774a855 100644
--- a/interface/src/Application.h
+++ b/interface/src/Application.h
@@ -421,6 +421,8 @@ public slots:
void updateVerboseLogging();
+ void changeViewAsNeeded(float boomLength);
+
private slots:
void onDesktopRootItemCreated(QQuickItem* qmlContext);
void onDesktopRootContextCreated(QQmlContext* qmlContext);
diff --git a/interface/src/FancyCamera.h b/interface/src/FancyCamera.h
index bee21bad22..4ca073fb4f 100644
--- a/interface/src/FancyCamera.h
+++ b/interface/src/FancyCamera.h
@@ -25,7 +25,7 @@ class FancyCamera : public Camera {
// FIXME: JSDoc 3.5.5 doesn't augment @property definitions. The following definition is repeated in Camera.h.
/**jsdoc
- * @property cameraEntity {Uuid} The ID of the entity that the camera position and orientation follow when the camera is in
+ * @property {Uuid} cameraEntity The ID of the entity that the camera position and orientation follow when the camera is in
* entity mode.
*/
Q_PROPERTY(QUuid cameraEntity READ getCameraEntity WRITE setCameraEntity)
diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp
index 5fb4e80b80..d3ab739649 100755
--- a/interface/src/avatar/MyAvatar.cpp
+++ b/interface/src/avatar/MyAvatar.cpp
@@ -2245,9 +2245,15 @@ void MyAvatar::updateActionMotor(float deltaTime) {
_actionMotorVelocity = getSensorToWorldScale() * (_walkSpeed.get() * _walkSpeedScalar) * direction;
}
+ float previousBoomLength = _boomLength;
float boomChange = getDriveKey(ZOOM);
_boomLength += 2.0f * _boomLength * boomChange + boomChange * boomChange;
_boomLength = glm::clamp(_boomLength, ZOOM_MIN, ZOOM_MAX);
+
+ // May need to change view if boom length has changed
+ if (previousBoomLength != _boomLength) {
+ qApp->changeViewAsNeeded(_boomLength);
+ }
}
void MyAvatar::updatePosition(float deltaTime) {
diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h
index a1eb29b38b..bba840d185 100644
--- a/interface/src/avatar/MyAvatar.h
+++ b/interface/src/avatar/MyAvatar.h
@@ -121,7 +121,7 @@ class MyAvatar : public Avatar {
* while flying.
* @property {number} hmdRollControlDeadZone=8 - The amount of HMD roll, in degrees, required before your avatar turns if
* hmdRollControlEnabled
is enabled.
- * @property hmdRollControlRate {number} If hmdRollControlEnabled is true, this value determines the maximum turn rate of
+ * @property {number} hmdRollControlRate If hmdRollControlEnabled is true, this value determines the maximum turn rate of
* your avatar when rolling your HMD in degrees per second.
* @property {number} userHeight=1.75 - The height of the user in sensor space.
* @property {number} userEyeHeight=1.65 - The estimated height of the user's eyes in sensor space. Read-only.
diff --git a/interface/src/raypick/PickScriptingInterface.h b/interface/src/raypick/PickScriptingInterface.h
index a39aa3a4a1..5ef5d27d74 100644
--- a/interface/src/raypick/PickScriptingInterface.h
+++ b/interface/src/raypick/PickScriptingInterface.h
@@ -22,21 +22,22 @@
* @hifi-interface
* @hifi-client-entity
*
- * @property PICK_NOTHING {number} A filter flag. Don't intersect with anything. Read-only.
- * @property PICK_ENTITIES {number} A filter flag. Include entities when intersecting. Read-only.
- * @property PICK_OVERLAYS {number} A filter flag. Include overlays when intersecting. Read-only.
- * @property PICK_AVATARS {number} A filter flag. Include avatars when intersecting. Read-only.
- * @property PICK_HUD {number} A filter flag. Include the HUD sphere when intersecting in HMD mode. Read-only.
- * @property PICK_COARSE {number} A filter flag. Pick against coarse meshes, instead of exact meshes. Read-only.
- * @property PICK_INCLUDE_INVISIBLE {number} A filter flag. Include invisible objects when intersecting. Read-only.
- * @property PICK_INCLUDE_NONCOLLIDABLE {number} A filter flag. Include non-collidable objects when intersecting.
+ * @property {number} PICK_NOTHING A filter flag. Don't intersect with anything. Read-only.
+ * @property {number} PICK_ENTITIES A filter flag. Include entities when intersecting. Read-only.
+ * @property {number} PICK_OVERLAYS A filter flag. Include overlays when intersecting. Read-only.
+ * @property {number} PICK_AVATARS A filter flag. Include avatars when intersecting. Read-only.
+ * @property {number} PICK_HUD A filter flag. Include the HUD sphere when intersecting in HMD mode. Read-only.
+ * @property {number} PICK_COARSE A filter flag. Pick against coarse meshes, instead of exact meshes. Read-only.
+ * @property {number} PICK_INCLUDE_INVISIBLE A filter flag. Include invisible objects when intersecting. Read-only.
+ * @property {number} PICK_INCLUDE_NONCOLLIDABLE A filter flag. Include non-collidable objects when intersecting.
* Read-only.
- * @property PICK_ALL_INTERSECTIONS {number} Read-only.
- * @property INTERSECTED_NONE {number} An intersection type. Intersected nothing with the given filter flags. Read-only.
- * @property INTERSECTED_ENTITY {number} An intersection type. Intersected an entity. Read-only.
- * @property INTERSECTED_OVERLAY {number} An intersection type. Intersected an overlay. Read-only.
- * @property INTERSECTED_AVATAR {number} An intersection type. Intersected an avatar. Read-only.
- * @property INTERSECTED_HUD {number} An intersection type. Intersected the HUD sphere. Read-only.
+ * @property {number} PICK_ALL_INTERSECTIONS Read-only.
+ * @property {number} INTERSECTED_NONE An intersection type. Intersected nothing with the given filter flags.
+ * Read-only.
+ * @property {number} INTERSECTED_ENTITY An intersection type. Intersected an entity. Read-only.
+ * @property {number} INTERSECTED_OVERLAY An intersection type. Intersected an overlay. Read-only.
+ * @property {number} INTERSECTED_AVATAR An intersection type. Intersected an avatar. Read-only.
+ * @property {number} INTERSECTED_HUD An intersection type. Intersected the HUD sphere. Read-only.
* @property {number} perFrameTimeBudget - The max number of usec to spend per frame updating Pick results. Read-only.
*/
@@ -99,7 +100,7 @@ public:
/**jsdoc
* An intersection result for a Ray Pick.
*
- * @typedef {Object} RayPickResult
+ * @typedef {object} RayPickResult
* @property {number} type The intersection type.
* @property {boolean} intersects If there was a valid intersection (type != INTERSECTED_NONE)
* @property {Uuid} objectID The ID of the intersected object. Uuid.NULL for the HUD or invalid intersections.
@@ -113,7 +114,7 @@ public:
/**jsdoc
* An intersection result for a Stylus Pick.
*
- * @typedef {Object} StylusPickResult
+ * @typedef {object} StylusPickResult
* @property {number} type The intersection type.
* @property {boolean} intersects If there was a valid intersection (type != INTERSECTED_NONE)
* @property {Uuid} objectID The ID of the intersected object. Uuid.NULL for the HUD or invalid intersections.
diff --git a/interface/src/raypick/PointerScriptingInterface.cpp b/interface/src/raypick/PointerScriptingInterface.cpp
index b7ac899c8d..4e953a5cb8 100644
--- a/interface/src/raypick/PointerScriptingInterface.cpp
+++ b/interface/src/raypick/PointerScriptingInterface.cpp
@@ -68,14 +68,14 @@ unsigned int PointerScriptingInterface::createStylus(const QVariant& properties)
* A set of properties used to define the visual aspect of a Ray Pointer in the case that the Pointer is not intersecting something. Same as a {@link Pointers.RayPointerRenderState},
* but with an additional distance field.
*
- * @typedef {Object} Pointers.DefaultRayPointerRenderState
+ * @typedef {object} Pointers.DefaultRayPointerRenderState
* @augments Pointers.RayPointerRenderState
* @property {number} distance The distance at which to render the end of this Ray Pointer, if one is defined.
*/
/**jsdoc
* A set of properties used to define the visual aspect of a Ray Pointer in the case that the Pointer is intersecting something.
*
- * @typedef {Object} Pointers.RayPointerRenderState
+ * @typedef {object} Pointers.RayPointerRenderState
* @property {string} name The name of this render state, used by {@link Pointers.setRenderState} and {@link Pointers.editRenderState}
* @property {Overlays.OverlayProperties} [start] All of the properties you would normally pass to {@link Overlays.addOverlay}, plus the type (as a type
field).
* An overlay to represent the beginning of the Ray Pointer, if desired.
@@ -87,7 +87,7 @@ unsigned int PointerScriptingInterface::createStylus(const QVariant& properties)
/**jsdoc
* A trigger mechanism for Ray Pointers.
*
- * @typedef {Object} Pointers.Trigger
+ * @typedef {object} Pointers.Trigger
* @property {Controller.Standard|Controller.Actions|function} action This can be a built-in Controller action, like Controller.Standard.LTClick, or a function that evaluates to >= 1.0 when you want to trigger button
.
* @property {string} button Which button to trigger. "Primary", "Secondary", "Tertiary", and "Focus" are currently supported. Only "Primary" will trigger clicks on web surfaces. If "Focus" is triggered,
* it will try to set the entity or overlay focus to the object at which the Pointer is aimed. Buttons besides the first three will still trigger events, but event.button will be "None".
diff --git a/interface/src/scripting/WindowScriptingInterface.cpp b/interface/src/scripting/WindowScriptingInterface.cpp
index 6f6e83842c..af9b5c8a46 100644
--- a/interface/src/scripting/WindowScriptingInterface.cpp
+++ b/interface/src/scripting/WindowScriptingInterface.cpp
@@ -522,7 +522,7 @@ int WindowScriptingInterface::openMessageBox(QString title, QString text, int bu
* RestoreDefaults 0x8000000
"Restore Defaults"
*
*
- * @typedef Window.MessageBoxButton
+ * @typedef {number} Window.MessageBoxButton
*/
int WindowScriptingInterface::createMessageBox(QString title, QString text, int buttons, int defaultButton) {
auto messageBox = DependencyManager::get()->createMessageBox(OffscreenUi::ICON_INFORMATION, title, text,
diff --git a/interface/src/scripting/WindowScriptingInterface.h b/interface/src/scripting/WindowScriptingInterface.h
index 1d06f33ec0..5ddbc30dd3 100644
--- a/interface/src/scripting/WindowScriptingInterface.h
+++ b/interface/src/scripting/WindowScriptingInterface.h
@@ -470,7 +470,7 @@ public slots:
*
*
*
- * @typedef Window.DisplayTexture
+ * @typedef {string} Window.DisplayTexture
*/
bool setDisplayTexture(const QString& name);
@@ -523,16 +523,21 @@ public slots:
int openMessageBox(QString title, QString text, int buttons, int defaultButton);
/**jsdoc
- * Open the given resource in the Interface window or in a web browser depending on the url scheme
+ * Open a URL in the Interface window or other application, depending on the URL's scheme. If the URL starts with
+ * hifi://
then that URL is navigated to in Interface, otherwise the URL is opened in the application the OS
+ * associates with the URL's scheme (e.g., a Web browser for http://
).
* @function Window.openUrl
- * @param {string} url - The resource to open
+ * @param {string} url - The URL to open.
*/
void openUrl(const QUrl& url);
/**jsdoc
- * (Android only) Open the requested Activity and optionally back to the scene when the activity is done
+ * Open an Android activity and optionally return back to the scene when the activity is completed. Android only.
* @function Window.openAndroidActivity
- * @param {string} activityName - The name of the activity to open. One of "Home", "Login" or "Privacy Policy"
+ * @param {string} activityName - The name of the activity to open: one of "Home"
, "Login"
, or
+ * "Privacy Policy"
.
+ * @param {boolean} backToScene - If true
, the user is automatically returned back to the scene when the
+ * activity is completed.
*/
void openAndroidActivity(const QString& activityName, const bool backToScene);
diff --git a/interface/src/ui/overlays/Billboard3DOverlay.cpp b/interface/src/ui/overlays/Billboard3DOverlay.cpp
index 960f0de095..ecade70ef8 100644
--- a/interface/src/ui/overlays/Billboard3DOverlay.cpp
+++ b/interface/src/ui/overlays/Billboard3DOverlay.cpp
@@ -46,6 +46,13 @@ bool Billboard3DOverlay::applyTransformTo(Transform& transform, bool force) {
return transformChanged;
}
+void Billboard3DOverlay::update(float duration) {
+ if (isFacingAvatar()) {
+ _renderVariableDirty = true;
+ }
+ Parent::update(duration);
+}
+
Transform Billboard3DOverlay::evalRenderTransform() {
Transform transform = getTransform();
bool transformChanged = applyTransformTo(transform, true);
diff --git a/interface/src/ui/overlays/Billboard3DOverlay.h b/interface/src/ui/overlays/Billboard3DOverlay.h
index 6b3aa40451..174bc23bc8 100644
--- a/interface/src/ui/overlays/Billboard3DOverlay.h
+++ b/interface/src/ui/overlays/Billboard3DOverlay.h
@@ -18,6 +18,7 @@
class Billboard3DOverlay : public Planar3DOverlay, public PanelAttachable, public Billboardable {
Q_OBJECT
+ using Parent = Planar3DOverlay;
public:
Billboard3DOverlay() {}
@@ -26,6 +27,8 @@ public:
void setProperties(const QVariantMap& properties) override;
QVariant getProperty(const QString& property) override;
+ void update(float duration) override;
+
protected:
virtual bool applyTransformTo(Transform& transform, bool force = false) override;
diff --git a/interface/src/ui/overlays/Image3DOverlay.cpp b/interface/src/ui/overlays/Image3DOverlay.cpp
index df93245922..6e9946e935 100644
--- a/interface/src/ui/overlays/Image3DOverlay.cpp
+++ b/interface/src/ui/overlays/Image3DOverlay.cpp
@@ -51,11 +51,6 @@ void Image3DOverlay::update(float deltatime) {
_texture = DependencyManager::get()->getTexture(_url);
_textureIsLoaded = false;
}
- if (usecTimestampNow() > _transformExpiry) {
- Transform transform = getTransform();
- applyTransformTo(transform);
- setTransform(transform);
- }
Parent::update(deltatime);
}
diff --git a/interface/src/ui/overlays/Overlays.h b/interface/src/ui/overlays/Overlays.h
index 3ff782da99..3debf74f26 100644
--- a/interface/src/ui/overlays/Overlays.h
+++ b/interface/src/ui/overlays/Overlays.h
@@ -53,7 +53,7 @@ const OverlayID UNKNOWN_OVERLAY_ID = OverlayID();
* @property {number} distance - The distance from the {@link PickRay} origin to the intersection point.
* @property {Vec3} surfaceNormal - The normal of the overlay surface at the intersection point.
* @property {Vec3} intersection - The position of the intersection point.
- * @property {Object} extraInfo Additional intersection details, if available.
+ * @property {object} extraInfo Additional intersection details, if available.
*/
class RayToOverlayIntersectionResult {
public:
@@ -482,7 +482,7 @@ public slots:
/**jsdoc
* Check if there is an overlay of a given ID.
- * @function Overlays.isAddedOverly
+ * @function Overlays.isAddedOverlay
* @param {Uuid} overlayID - The ID to check.
* @returns {boolean} true
if an overlay with the given ID exists, false
otherwise.
*/
diff --git a/interface/src/ui/overlays/Text3DOverlay.cpp b/interface/src/ui/overlays/Text3DOverlay.cpp
index 9c920efb93..b128ce7df7 100644
--- a/interface/src/ui/overlays/Text3DOverlay.cpp
+++ b/interface/src/ui/overlays/Text3DOverlay.cpp
@@ -83,15 +83,6 @@ xColor Text3DOverlay::getBackgroundColor() {
return result;
}
-void Text3DOverlay::update(float deltatime) {
- if (usecTimestampNow() > _transformExpiry) {
- Transform transform = getTransform();
- applyTransformTo(transform);
- setTransform(transform);
- }
- Parent::update(deltatime);
-}
-
void Text3DOverlay::render(RenderArgs* args) {
if (!_renderVisible || !getParentVisible()) {
return; // do nothing if we're not visible
@@ -306,13 +297,4 @@ QSizeF Text3DOverlay::textSize(const QString& text) const {
float pointToWorldScale = (maxHeight / FIXED_FONT_SCALING_RATIO) * _lineHeight;
return QSizeF(extents.x, extents.y) * pointToWorldScale;
-}
-
-bool Text3DOverlay::findRayIntersection(const glm::vec3 &origin, const glm::vec3 &direction, float &distance,
- BoxFace &face, glm::vec3& surfaceNormal) {
- Transform transform = getTransform();
- applyTransformTo(transform, true);
- setTransform(transform);
- return Billboard3DOverlay::findRayIntersection(origin, direction, distance, face, surfaceNormal);
-}
-
+}
\ No newline at end of file
diff --git a/interface/src/ui/overlays/Text3DOverlay.h b/interface/src/ui/overlays/Text3DOverlay.h
index daa5fdc804..21163101d0 100644
--- a/interface/src/ui/overlays/Text3DOverlay.h
+++ b/interface/src/ui/overlays/Text3DOverlay.h
@@ -30,8 +30,6 @@ public:
~Text3DOverlay();
virtual void render(RenderArgs* args) override;
- virtual void update(float deltatime) override;
-
virtual const render::ShapeKey getShapeKey() override;
// getters
@@ -60,9 +58,6 @@ public:
QSizeF textSize(const QString& test) const; // Meters
- virtual bool findRayIntersection(const glm::vec3& origin, const glm::vec3& direction, float& distance,
- BoxFace& face, glm::vec3& surfaceNormal) override;
-
virtual Text3DOverlay* createClone() const override;
private:
diff --git a/interface/src/ui/overlays/Web3DOverlay.cpp b/interface/src/ui/overlays/Web3DOverlay.cpp
index 88cc21efe2..6f8245e575 100644
--- a/interface/src/ui/overlays/Web3DOverlay.cpp
+++ b/interface/src/ui/overlays/Web3DOverlay.cpp
@@ -260,7 +260,6 @@ void Web3DOverlay::setupQmlSurface() {
_webSurface->getSurfaceContext()->setContextProperty("Web3DOverlay", this);
_webSurface->getSurfaceContext()->setContextProperty("Window", DependencyManager::get().data());
_webSurface->getSurfaceContext()->setContextProperty("Reticle", qApp->getApplicationCompositor().getReticleInterface());
- _webSurface->getSurfaceContext()->setContextProperty("desktop", DependencyManager::get()->getDesktop());
_webSurface->getSurfaceContext()->setContextProperty("HiFiAbout", AboutUtil::getInstance());
// Override min fps for tablet UI, for silky smooth scrolling
diff --git a/libraries/animation/src/AnimationCache.h b/libraries/animation/src/AnimationCache.h
index d8f8a13cde..4b0a8901f5 100644
--- a/libraries/animation/src/AnimationCache.h
+++ b/libraries/animation/src/AnimationCache.h
@@ -70,7 +70,7 @@ public:
* @function AnimationCache.prefetch
* @param {string} url - URL of the resource to prefetch.
* @param {object} [extra=null]
- * @returns {Resource}
+ * @returns {ResourceObject}
*/
/**jsdoc
@@ -79,7 +79,7 @@ public:
* @param {string} url - URL of the resource to load.
* @param {string} [fallback=""] - Fallback URL if load of the desired URL fails.
* @param {} [extra=null]
- * @returns {Resource}
+ * @returns {object}
*/
@@ -87,7 +87,7 @@ public:
* Returns animation resource for particular animation.
* @function AnimationCache.getAnimation
* @param {string} url - URL to load.
- * @returns {Resource} animation
+ * @returns {AnimationObject} animation
*/
Q_INVOKABLE AnimationPointer getAnimation(const QString& url) { return getAnimation(QUrl(url)); }
Q_INVOKABLE AnimationPointer getAnimation(const QUrl& url);
@@ -104,6 +104,17 @@ private:
Q_DECLARE_METATYPE(AnimationPointer)
+/**jsdoc
+ * @class AnimationObject
+ *
+ * @hifi-interface
+ * @hifi-client-entity
+ * @hifi-server-entity
+ * @hifi-assignment-client
+ *
+ * @property {string[]} jointNames
+ * @property {FBXAnimationFrame[]} frames
+ */
/// An animation loaded from the network.
class Animation : public Resource {
Q_OBJECT
@@ -118,9 +129,16 @@ public:
virtual bool isLoaded() const override;
-
+ /**jsdoc
+ * @function AnimationObject.getJointNames
+ * @returns {string[]}
+ */
Q_INVOKABLE QStringList getJointNames() const;
+ /**jsdoc
+ * @function AnimationObject.getFrames
+ * @returns {FBXAnimationFrame[]}
+ */
Q_INVOKABLE QVector getFrames() const;
const QVector& getFramesReference() const;
diff --git a/libraries/audio/src/Sound.h b/libraries/audio/src/Sound.h
index 69dbf5a913..4cfdac7792 100644
--- a/libraries/audio/src/Sound.h
+++ b/libraries/audio/src/Sound.h
@@ -77,6 +77,17 @@ private:
typedef QSharedPointer SharedSoundPointer;
+/**jsdoc
+ * @class SoundObject
+ *
+ * @hifi-interface
+ * @hifi-client-entity
+ * @hifi-server-entity
+ * @hifi-assignment-client
+ *
+ * @property {boolean} downloaded
+ * @property {number} duration
+ */
class SoundScriptingInterface : public QObject {
Q_OBJECT
@@ -90,6 +101,10 @@ public:
bool isReady() const { return _sound->isReady(); }
float getDuration() { return _sound->getDuration(); }
+/**jsdoc
+ * @function SoundObject.ready
+ * @returns {Signal}
+ */
signals:
void ready();
diff --git a/libraries/audio/src/SoundCache.h b/libraries/audio/src/SoundCache.h
index 347f324353..4352b1d459 100644
--- a/libraries/audio/src/SoundCache.h
+++ b/libraries/audio/src/SoundCache.h
@@ -64,7 +64,7 @@ public:
* @function SoundCache.prefetch
* @param {string} url - URL of the resource to prefetch.
* @param {object} [extra=null]
- * @returns {Resource}
+ * @returns {ResourceObject}
*/
/**jsdoc
@@ -73,14 +73,14 @@ public:
* @param {string} url - URL of the resource to load.
* @param {string} [fallback=""] - Fallback URL if load of the desired URL fails.
* @param {} [extra=null]
- * @returns {Resource}
+ * @returns {object}
*/
/**jsdoc
* @function SoundCache.getSound
* @param {string} url
- * @returns {object}
+ * @returns {SoundObject}
*/
Q_INVOKABLE SharedSoundPointer getSound(const QUrl& url);
protected:
diff --git a/libraries/avatars-renderer/src/avatars-renderer/Avatar.h b/libraries/avatars-renderer/src/avatars-renderer/Avatar.h
index 01114b5f6d..0f48e03e55 100644
--- a/libraries/avatars-renderer/src/avatars-renderer/Avatar.h
+++ b/libraries/avatars-renderer/src/avatars-renderer/Avatar.h
@@ -292,7 +292,7 @@ public:
*/
/**jsdoc
* Information about a single joint in an Avatar's skeleton hierarchy.
- * @typedef MyAvatar.SkeletonJoint
+ * @typedef {object} MyAvatar.SkeletonJoint
* @property {string} name - Joint name.
* @property {number} index - Joint index.
* @property {number} parentIndex - Index of this joint's parent (-1 if no parent).
diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp
index 7a28686f8c..48ef1fb881 100644
--- a/libraries/avatars/src/AvatarData.cpp
+++ b/libraries/avatars/src/AvatarData.cpp
@@ -2363,7 +2363,7 @@ glm::vec3 AvatarData::getAbsoluteJointTranslationInObjectFrame(int index) const
}
/**jsdoc
- * @typedef AttachmentData
+ * @typedef {object} AttachmentData
* @property {string} modelUrl
* @property {string} jointName
* @property {Vec3} translation
diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h
index 62a14ec51e..4946ce45b9 100644
--- a/libraries/avatars/src/AvatarData.h
+++ b/libraries/avatars/src/AvatarData.h
@@ -578,8 +578,7 @@ public:
* @param {Quat} rotation - The rotation of the joint relative to its parent.
* @param {Vec3} translation - The translation of the joint relative to its parent.
* @example Set your avatar to it's default T-pose for a while.
- *
- *
+ *
* // Set all joint translations and rotations to defaults.
* var i, length, rotation, translation;
* for (i = 0, length = MyAvatar.getJointNames().length; i < length; i++) {
@@ -680,8 +679,7 @@ public:
* @param {string} name - The name of the joint.
* @param {Quat} rotation - The rotation of the joint relative to its parent.
* @example Set your avatar to its default T-pose then rotate its right arm.
- *
+ *
* // Set all joint translations and rotations to defaults.
* var i, length, rotation, translation;
* for (i = 0, length = MyAvatar.getJointNames().length; i < length; i++) {
@@ -713,8 +711,7 @@ public:
* @param {Vec3} translation - The translation of the joint relative to its parent.
* @example Stretch your avatar's neck. Depending on the avatar you are using, you will either see a gap between
* the head and body or you will see the neck stretched.
- *
+ *
* // Stretch your avatar's neck.
* MyAvatar.setJointTranslation("Neck", { x: 0, y: 25, z: 0 });
*
@@ -798,8 +795,7 @@ public:
* @param {Quat[]} jointRotations - The rotations for all joints in the avatar. The values are in the same order as the
* array returned by {@link MyAvatar.getJointNames} or {@link Avatar.getJointNames}.
* @example Set your avatar to its default T-pose then rotate its right arm.
- *
- *
+ *
* // Set all joint translations and rotations to defaults.
* var i, length, rotation, translation;
* for (i = 0, length = MyAvatar.getJointNames().length; i < length; i++) {
diff --git a/libraries/controllers/src/controllers/Actions.cpp b/libraries/controllers/src/controllers/Actions.cpp
index 978b0888ba..6923ef4b98 100644
--- a/libraries/controllers/src/controllers/Actions.cpp
+++ b/libraries/controllers/src/controllers/Actions.cpp
@@ -307,7 +307,7 @@ namespace controller {
* action.
*
*
- * @typedef Controller.Actions
+ * @typedef {object} Controller.Actions
*/
// Device functions
Input::NamedVector ActionsDevice::getAvailableInputs() const {
diff --git a/libraries/controllers/src/controllers/InputDevice.h b/libraries/controllers/src/controllers/InputDevice.h
index 30a58eb2f0..1e626e6a3c 100644
--- a/libraries/controllers/src/controllers/InputDevice.h
+++ b/libraries/controllers/src/controllers/InputDevice.h
@@ -79,7 +79,7 @@ enum Hand {
* {@link Controller.Hardware-Vive}.
*
*
- * @typedef Controller.Hardware
+ * @typedef {object} Controller.Hardware
* @example List all the currently available Controller.Hardware
properties.
* function printProperties(string, item) {
* print(string);
diff --git a/libraries/controllers/src/controllers/StandardController.cpp b/libraries/controllers/src/controllers/StandardController.cpp
index 471943400d..e1733d2524 100644
--- a/libraries/controllers/src/controllers/StandardController.cpp
+++ b/libraries/controllers/src/controllers/StandardController.cpp
@@ -231,7 +231,7 @@ void StandardController::focusOutEvent() {
*
*
*
- * @typedef Controller.Standard
+ * @typedef {object} Controller.Standard
*/
Input::NamedVector StandardController::getAvailableInputs() const {
static Input::NamedVector availableInputs {
diff --git a/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.cpp b/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.cpp
index 354d3242a9..513f955e9e 100644
--- a/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.cpp
+++ b/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.cpp
@@ -46,6 +46,8 @@
const char* SRGB_TO_LINEAR_FRAG = R"SCRIBE(
+// OpenGLDisplayPlugin_present.frag
+
uniform sampler2D colorMap;
in vec2 varTexCoord0;
diff --git a/libraries/entities/src/AnimationPropertyGroup.cpp b/libraries/entities/src/AnimationPropertyGroup.cpp
index 43c6b7a6a5..2db85eb7ac 100644
--- a/libraries/entities/src/AnimationPropertyGroup.cpp
+++ b/libraries/entities/src/AnimationPropertyGroup.cpp
@@ -46,7 +46,7 @@ bool operator!=(const AnimationPropertyGroup& a, const AnimationPropertyGroup& b
/**jsdoc
* The AnimationProperties are used to configure an animation.
- * @typedef Entities.AnimationProperties
+ * @typedef {object} Entities.AnimationProperties
* @property {string} url="" - The URL of the FBX file that has the animation.
* @property {number} fps=30 - The speed in frames/s that the animation is played at.
* @property {number} firstFrame=0 - The first frame to play in the animation.
diff --git a/libraries/entities/src/EntityItemProperties.cpp b/libraries/entities/src/EntityItemProperties.cpp
index 949b39bb7d..3ada45f4a0 100644
--- a/libraries/entities/src/EntityItemProperties.cpp
+++ b/libraries/entities/src/EntityItemProperties.cpp
@@ -486,7 +486,7 @@ EntityPropertyFlags EntityItemProperties::getChangedProperties() const {
* @property {boolean} locked=false - Whether or not the entity can be edited or deleted. If true
then the
* entity's properties other than locked
cannot be changed, and the entity cannot be deleted.
* @property {boolean} visible=true - Whether or not the entity is rendered. If true
then the entity is rendered.
- * @property {boolean} canCastShadows=true - Whether or not the entity casts shadows. Currently applicable only to
+ * @property {boolean} canCastShadow=true - Whether or not the entity can cast a shadow. Currently applicable only to
* {@link Entities.EntityType|Model} and {@link Entities.EntityType|Shape} entities. Shadows are cast if inside a
* {@link Entities.EntityType|Zone} entity with castShadows
enabled in its
* {@link Entities.EntityProperties-Zone|keyLight} property.
@@ -1398,7 +1398,7 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
/**jsdoc
* The axis-aligned bounding box of an entity.
- * @typedef Entities.BoundingBox
+ * @typedef {object} Entities.BoundingBox
* @property {Vec3} brn - The bottom right near (minimum axes values) corner of the AA box.
* @property {Vec3} tfl - The top far left (maximum axes values) corner of the AA box.
* @property {Vec3} center - The center of the AA box.
diff --git a/libraries/entities/src/KeyLightPropertyGroup.h b/libraries/entities/src/KeyLightPropertyGroup.h
index 2be33787de..b966b78fc7 100644
--- a/libraries/entities/src/KeyLightPropertyGroup.h
+++ b/libraries/entities/src/KeyLightPropertyGroup.h
@@ -35,7 +35,7 @@ class ReadBitstreamToTreeParams;
* @property {Vec3} direction=0,-1,0 - The direction the light is shining.
* @property {boolean} castShadows=false - If true
then shadows are cast. Shadows are cast by avatars, plus
* {@link Entities.EntityType|Model} and {@link Entities.EntityType|Shape} entities that have their
- * {@link Entities.EntityProperties|canCastShadows}
property set to true
.
+ * {@link Entities.EntityProperties|canCastShadow}
property set to true
.
*/
class KeyLightPropertyGroup : public PropertyGroup {
public:
diff --git a/libraries/fbx/src/FBX.h b/libraries/fbx/src/FBX.h
index 239908f86c..fc94236c96 100644
--- a/libraries/fbx/src/FBX.h
+++ b/libraries/fbx/src/FBX.h
@@ -258,6 +258,11 @@ public:
QHash texcoordSetMap;
};
+/**jsdoc
+ * @typedef {object} FBXAnimationFrame
+ * @property {Quat[]} rotations
+ * @property {Vec3[]} translations
+ */
/// A single animation frame extracted from an FBX document.
class FBXAnimationFrame {
public:
diff --git a/libraries/fbx/src/GLTFReader.cpp b/libraries/fbx/src/GLTFReader.cpp
index f322c2319e..1fa4b3873e 100644
--- a/libraries/fbx/src/GLTFReader.cpp
+++ b/libraries/fbx/src/GLTFReader.cpp
@@ -1174,7 +1174,7 @@ bool GLTFReader::addArrayOfType(const QByteArray& bin, int byteOffset, int byteL
break;
}
case GLTFAccessorComponentType::UNSIGNED_INT: {
- readArray(bin, byteOffset, byteLength, outarray, accessorType);
+ readArray(bin, byteOffset, byteLength, outarray, accessorType);
break;
}
case GLTFAccessorComponentType::UNSIGNED_SHORT: {
diff --git a/libraries/fbx/src/GLTFReader.h b/libraries/fbx/src/GLTFReader.h
index 3554594768..28c1d8282f 100644
--- a/libraries/fbx/src/GLTFReader.h
+++ b/libraries/fbx/src/GLTFReader.h
@@ -190,7 +190,7 @@ namespace GLTFBufferViewTarget {
struct GLTFBufferView {
int buffer; //required
int byteLength; //required
- int byteOffset;
+ int byteOffset { 0 };
int target;
QMap defined;
void dump() {
@@ -470,7 +470,7 @@ namespace GLTFAccessorComponentType {
}
struct GLTFAccessor {
int bufferView;
- int byteOffset;
+ int byteOffset { 0 };
int componentType; //required
int count; //required
int type; //required
diff --git a/libraries/gl/src/gl/GLShaders.cpp b/libraries/gl/src/gl/GLShaders.cpp
index ecd6fe3323..9bfe214fcf 100644
--- a/libraries/gl/src/gl/GLShaders.cpp
+++ b/libraries/gl/src/gl/GLShaders.cpp
@@ -2,15 +2,64 @@
#include "GLLogging.h"
-namespace gl {
+#include
+#include
+#include
+#include
+#include
+#include
+
+using namespace gl;
+
+void Uniform::load(GLuint glprogram, int index) {
+ const GLint NAME_LENGTH = 256;
+ GLchar glname[NAME_LENGTH];
+ GLint length = 0;
+ glGetActiveUniform(glprogram, index, NAME_LENGTH, &length, &size, &type, glname);
+ name = std::string(glname, length);
+ location = glGetUniformLocation(glprogram, glname);
+}
+
+Uniforms gl::loadUniforms(GLuint glprogram) {
+ GLint uniformsCount = 0;
+ glGetProgramiv(glprogram, GL_ACTIVE_UNIFORMS, &uniformsCount);
+
+ Uniforms result;
+ result.resize(uniformsCount);
+ for (int i = 0; i < uniformsCount; i++) {
+ result[i].load(glprogram, i);
+ }
+ return result;
+}
#ifdef SEPARATE_PROGRAM
- bool compileShader(GLenum shaderDomain, const std::string& shaderSource, const std::string& defines, GLuint &shaderObject, GLuint &programObject, std::string& message) {
+bool gl::compileShader(GLenum shaderDomain,
+ const std::string& shaderSource,
+ GLuint& shaderObject,
+ GLuint& programObject,
+ std::string& message) {
+ return compileShader(shaderDomain, std::vector{ shaderSource }, shaderObject, programObject, message);
+}
#else
- bool compileShader(GLenum shaderDomain, const std::string& shaderSource, const std::string& defines, GLuint &shaderObject, std::string& message) {
+bool gl::compileShader(GLenum shaderDomain, const std::string& shaderSource, GLuint& shaderObject, std::string& message) {
+ return compileShader(shaderDomain, std::vector{ shaderSource }, shaderObject, message);
+}
#endif
- if (shaderSource.empty()) {
+
+#ifdef SEPARATE_PROGRAM
+bool gl::compileShader(GLenum shaderDomain,
+ const std::string& shaderSource,
+ GLuint& shaderObject,
+ GLuint& programObject,
+ std::string& message) {
+#else
+bool gl::compileShader(GLenum shaderDomain,
+ const std::vector& shaderSources,
+ GLuint& shaderObject,
+ std::string& message) {
+#endif
+ if (shaderSources.empty()) {
qCDebug(glLogging) << "GLShader::compileShader - no GLSL shader source code ? so failed to create";
return false;
}
@@ -23,9 +72,11 @@ namespace gl {
}
// Assign the source
- const int NUM_SOURCE_STRINGS = 2;
- const GLchar* srcstr[] = { defines.c_str(), shaderSource.c_str() };
- glShaderSource(glshader, NUM_SOURCE_STRINGS, srcstr, NULL);
+ std::vector cstrs;
+ for (const auto& str : shaderSources) {
+ cstrs.push_back(str.c_str());
+ }
+ glShaderSource(glshader, static_cast(cstrs.size()), cstrs.data(), NULL);
// Compile !
glCompileShader(glshader);
@@ -66,7 +117,7 @@ namespace gl {
qCCritical(glLogging) << "GLShader::compileShader - failed to compile the gl shader object:";
int lineNumber = 0;
- for (auto s : srcstr) {
+ for (const auto& s : cstrs) {
QString str(s);
QStringList lines = str.split("\n");
for (auto& line : lines) {
@@ -142,7 +193,7 @@ namespace gl {
return true;
}
-GLuint compileProgram(const std::vector& glshaders, std::string& message, std::vector& binary) {
+GLuint gl::compileProgram(const std::vector& glshaders, std::string& message, CachedShader& cachedShader) {
// A brand new program:
GLuint glprogram = glCreateProgram();
if (!glprogram) {
@@ -150,14 +201,21 @@ GLuint compileProgram(const std::vector& glshaders, std::string& message
return 0;
}
- // glProgramParameteri(glprogram, GL_PROGRAM_, GL_TRUE);
- // Create the program from the sub shaders
- for (auto so : glshaders) {
- glAttachShader(glprogram, so);
- }
+ bool binaryLoaded = false;
- // Link!
- glLinkProgram(glprogram);
+ if (glshaders.empty() && cachedShader) {
+ glProgramBinary(glprogram, cachedShader.format, cachedShader.binary.data(), (GLsizei)cachedShader.binary.size());
+ binaryLoaded = true;
+ } else {
+ // glProgramParameteri(glprogram, GL_PROGRAM_, GL_TRUE);
+ // Create the program from the sub shaders
+ for (auto so : glshaders) {
+ glAttachShader(glprogram, so);
+ }
+
+ // Link!
+ glLinkProgram(glprogram);
+ }
GLint linked = 0;
glGetProgramiv(glprogram, GL_LINK_STATUS, &linked);
@@ -205,25 +263,73 @@ GLuint compileProgram(const std::vector& glshaders, std::string& message
}
// If linked get the binaries
- if (linked) {
+ if (linked && !binaryLoaded) {
GLint binaryLength = 0;
glGetProgramiv(glprogram, GL_PROGRAM_BINARY_LENGTH, &binaryLength);
-
if (binaryLength > 0) {
- GLint numBinFormats = 0;
- glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &numBinFormats);
- if (numBinFormats > 0) {
- binary.resize(binaryLength);
- std::vector binFormats(numBinFormats);
- glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, binFormats.data());
-
- GLenum programBinFormat;
- glGetProgramBinary(glprogram, binaryLength, NULL, &programBinFormat, binary.data());
- }
+ cachedShader.binary.resize(binaryLength);
+ glGetProgramBinary(glprogram, binaryLength, NULL, &cachedShader.format, cachedShader.binary.data());
}
}
return glprogram;
}
+const QString& getShaderCacheFile() {
+ static const QString SHADER_CACHE_FOLDER{ "shaders" };
+ static const QString SHADER_CACHE_FILE_NAME{ "cache.json" };
+ static const QString SHADER_CACHE_FILE = FileUtils::standardPath(SHADER_CACHE_FOLDER) + SHADER_CACHE_FILE_NAME;
+ return SHADER_CACHE_FILE;
+}
+
+static const char* SHADER_JSON_TYPE_KEY = "type";
+static const char* SHADER_JSON_SOURCE_KEY = "source";
+static const char* SHADER_JSON_DATA_KEY = "data";
+
+void gl::loadShaderCache(ShaderCache& cache) {
+ QString shaderCacheFile = getShaderCacheFile();
+ if (QFileInfo(shaderCacheFile).exists()) {
+ QString json = FileUtils::readFile(shaderCacheFile);
+ auto root = QJsonDocument::fromJson(json.toUtf8()).object();
+ for (const auto& qhash : root.keys()) {
+ auto programObject = root[qhash].toObject();
+ QByteArray qbinary = QByteArray::fromBase64(programObject[SHADER_JSON_DATA_KEY].toString().toUtf8());
+ std::string hash = qhash.toStdString();
+ auto& cachedShader = cache[hash];
+ cachedShader.binary.resize(qbinary.size());
+ memcpy(cachedShader.binary.data(), qbinary.data(), qbinary.size());
+ cachedShader.format = (GLenum)programObject[SHADER_JSON_TYPE_KEY].toInt();
+ cachedShader.source = programObject[SHADER_JSON_SOURCE_KEY].toString().toStdString();
+ }
+ }
+}
+
+void gl::saveShaderCache(const ShaderCache& cache) {
+ QByteArray json;
+ {
+ QVariantMap variantMap;
+ for (const auto& entry : cache) {
+ const auto& key = entry.first;
+ const auto& type = entry.second.format;
+ const auto& binary = entry.second.binary;
+ QVariantMap qentry;
+ qentry[SHADER_JSON_TYPE_KEY] = QVariant(type);
+ qentry[SHADER_JSON_SOURCE_KEY] = QString(entry.second.source.c_str());
+ qentry[SHADER_JSON_DATA_KEY] = QByteArray{ binary.data(), (int)binary.size() }.toBase64();
+ variantMap[key.c_str()] = qentry;
+ }
+ json = QJsonDocument::fromVariant(variantMap).toJson(QJsonDocument::Indented);
+ }
+
+ if (!json.isEmpty()) {
+ QString shaderCacheFile = getShaderCacheFile();
+ QFile saveFile(shaderCacheFile);
+ saveFile.open(QFile::WriteOnly | QFile::Text | QFile::Truncate);
+ saveFile.write(json);
+ saveFile.close();
+ }
+}
+
+std::string gl::getShaderHash(const std::string& shaderSource) {
+ return QCryptographicHash::hash(QByteArray(shaderSource.c_str()), QCryptographicHash::Md5).toBase64().toStdString();
}
diff --git a/libraries/gl/src/gl/GLShaders.h b/libraries/gl/src/gl/GLShaders.h
index fc070d7659..e6c11b4eb3 100644
--- a/libraries/gl/src/gl/GLShaders.h
+++ b/libraries/gl/src/gl/GLShaders.h
@@ -14,15 +14,47 @@
#include
#include
+#include
namespace gl {
+
+ struct Uniform {
+ std::string name;
+ GLint size{ -1 };
+ GLenum type{ GL_FLOAT };
+ GLint location{ -1 };
+ void load(GLuint glprogram, int index);
+ };
+
+ using Uniforms = std::vector;
+
+ Uniforms loadUniforms(GLuint glprogram);
+
+ struct CachedShader {
+ GLenum format{ 0 };
+ std::string source;
+ std::vector binary;
+ inline operator bool() const {
+ return format != 0 && !binary.empty();
+ }
+ };
+
+ using ShaderCache = std::unordered_map;
+
+ std::string getShaderHash(const std::string& shaderSource);
+ void loadShaderCache(ShaderCache& cache);
+ void saveShaderCache(const ShaderCache& cache);
+
+
#ifdef SEPARATE_PROGRAM
- bool compileShader(GLenum shaderDomain, const std::string& shaderSource, const std::string& defines, GLuint &shaderObject, GLuint &programObject, std::string& message);
+ bool compileShader(GLenum shaderDomain, const std::string& shaderSource, GLuint &shaderObject, GLuint &programObject, std::string& message);
+ bool compileShader(GLenum shaderDomain, const std::vector& shaderSources, GLuint &shaderObject, GLuint &programObject, std::string& message);
#else
- bool compileShader(GLenum shaderDomain, const std::string& shaderSource, const std::string& defines, GLuint &shaderObject, std::string& message);
+ bool compileShader(GLenum shaderDomain, const std::string& shaderSource, GLuint &shaderObject, std::string& message);
+ bool compileShader(GLenum shaderDomain, const std::vector& shaderSources, GLuint &shaderObject, std::string& message);
#endif
- GLuint compileProgram(const std::vector& glshaders, std::string& message, std::vector& binary);
+ GLuint compileProgram(const std::vector& glshaders, std::string& message, CachedShader& binary);
}
diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp
index f484de57f1..2321342eb4 100644
--- a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp
+++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp
@@ -124,13 +124,16 @@ void GLBackend::init() {
GLBackend::GLBackend() {
_pipeline._cameraCorrectionBuffer._buffer->flush();
glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &_uboAlignment);
+ initShaderBinaryCache();
}
+GLBackend::~GLBackend() {}
-GLBackend::~GLBackend() {
+void GLBackend::shutdown() {
killInput();
killTransform();
killTextureManagementStage();
+ killShaderBinaryCache();
}
void GLBackend::renderPassTransfer(const Batch& batch) {
diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h
index 32c75d0363..622c8f1081 100644
--- a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h
+++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h
@@ -23,6 +23,7 @@
#include
#include
+#include
#include
#include
@@ -71,6 +72,9 @@ public:
virtual ~GLBackend();
+ // Shutdown rendering and persist any required resources
+ void shutdown() override;
+
void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset = false);
void render(const Batch& batch) final override;
@@ -455,6 +459,13 @@ protected:
virtual GLShader* compileBackendProgram(const Shader& program, const Shader::CompilationHandler& handler);
virtual GLShader* compileBackendShader(const Shader& shader, const Shader::CompilationHandler& handler);
virtual std::string getBackendShaderHeader() const = 0;
+ // For a program, this will return a string containing all the source files (without any
+ // backend headers or defines). For a vertex, fragment or geometry shader, this will
+ // return the fully customized shader with all the version and backend specific
+ // preprocessor directives
+ // The program string returned can be used as a key for a cache of shader binaries
+ // The shader strings can be reliably sent to the low level `compileShader` functions
+ virtual std::string getShaderSource(const Shader& shader, int version) final;
virtual void makeProgramBindings(ShaderObject& shaderObject);
class ElementResource {
public:
@@ -465,12 +476,12 @@ protected:
ElementResource getFormatFromGLUniform(GLenum gltype);
static const GLint UNUSED_SLOT {-1};
static bool isUnusedSlot(GLint binding) { return (binding == UNUSED_SLOT); }
- virtual int makeUniformSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,
+ virtual int makeUniformSlots(const ShaderObject& program, const Shader::BindingSet& slotBindings,
Shader::SlotSet& uniforms, Shader::SlotSet& textures, Shader::SlotSet& samplers);
- virtual int makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& buffers);
- virtual int makeResourceBufferSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& resourceBuffers) = 0;
- virtual int makeInputSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& inputs);
- virtual int makeOutputSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& outputs);
+ virtual int makeUniformBlockSlots(const ShaderObject& program, const Shader::BindingSet& slotBindings, Shader::SlotSet& buffers);
+ virtual int makeResourceBufferSlots(const ShaderObject& program, const Shader::BindingSet& slotBindings, Shader::SlotSet& resourceBuffers) = 0;
+ virtual int makeInputSlots(const ShaderObject& program, const Shader::BindingSet& slotBindings, Shader::SlotSet& inputs);
+ virtual int makeOutputSlots(const ShaderObject& program, const Shader::BindingSet& slotBindings, Shader::SlotSet& outputs);
// Synchronize the state cache of this Backend with the actual real state of the GL Context
@@ -489,6 +500,19 @@ protected:
void resetStages();
+ // Stores cached binary versions of the shaders for quicker startup on subsequent runs
+ // Note that shaders in the cache can still fail to load due to hardware or driver
+ // changes that invalidate the cached binary, in which case we fall back on compiling
+ // the source again
+ struct ShaderBinaryCache {
+ std::mutex _mutex;
+ std::vector _formats;
+ std::unordered_map _binaries;
+ } _shaderBinaryCache;
+
+ virtual void initShaderBinaryCache();
+ virtual void killShaderBinaryCache();
+
struct TextureManagementStageState {
bool _sparseCapable { false };
GLTextureTransferEnginePointer _transferEngine;
diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackendShader.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLBackendShader.cpp
index bf36c134de..af6a0df297 100644
--- a/libraries/gpu-gl-common/src/gpu/gl/GLBackendShader.cpp
+++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackendShader.cpp
@@ -11,6 +11,8 @@
using namespace gpu;
using namespace gpu::gl;
+using CachedShader = ::gl::CachedShader;
+
// Shader domain
static const size_t NUM_SHADER_DOMAINS = 3;
@@ -68,9 +70,45 @@ static const std::array VERSION_DEFINES { {
stereoVersion
} };
+static std::string getShaderTypeString(Shader::Type type) {
+ switch (type) {
+ case Shader::Type::VERTEX:
+ return "vertex";
+ case Shader::Type::PIXEL:
+ return "pixel";
+ case Shader::Type::GEOMETRY:
+ return "geometry";
+ case Shader::Type::PROGRAM:
+ return "program";
+ default:
+ qFatal("Unexpected shader type %d", type);
+ Q_UNREACHABLE();
+ }
+}
+
+std::string GLBackend::getShaderSource(const Shader& shader, int version) {
+ if (shader.isProgram()) {
+ std::string result;
+ result.append("// VERSION " + std::to_string(version));
+ for (const auto& subShader : shader.getShaders()) {
+ result.append("//-------- ");
+ result.append(getShaderTypeString(subShader->getType()));
+ result.append("\n");
+ result.append(subShader->getSource().getCode());
+ }
+ return result;
+ }
+
+ std::string shaderDefines = getBackendShaderHeader() + "\n"
+ + (supportsBindless() ? textureTableVersion : "\n")
+ + DOMAIN_DEFINES[shader.getType()] + "\n"
+ + VERSION_DEFINES[version];
+
+ return shaderDefines + "\n" + shader.getSource().getCode();
+}
+
GLShader* GLBackend::compileBackendShader(const Shader& shader, const Shader::CompilationHandler& handler) {
// Any GLSLprogram ? normally yes...
- const std::string& shaderSource = shader.getSource().getCode();
GLenum shaderDomain = SHADER_DOMAINS[shader.getType()];
GLShader::ShaderObjects shaderObjects;
Shader::CompilationLogs compilationLogs(GLShader::NumVersions);
@@ -78,11 +116,7 @@ GLShader* GLBackend::compileBackendShader(const Shader& shader, const Shader::Co
for (int version = 0; version < GLShader::NumVersions; version++) {
auto& shaderObject = shaderObjects[version];
-
- std::string shaderDefines = getBackendShaderHeader() + "\n"
- + (supportsBindless() ? textureTableVersion : "\n")
- + DOMAIN_DEFINES[shader.getType()] + "\n"
- + VERSION_DEFINES[version];
+ auto shaderSource = getShaderSource(shader, version);
if (handler) {
bool retest = true;
std::string currentSrc = shaderSource;
@@ -90,7 +124,7 @@ GLShader* GLBackend::compileBackendShader(const Shader& shader, const Shader::Co
// The retest bool is set to false as soon as the compilation succeed to wexit the while loop.
// The handler tells us if we should retry or not while returning a modified version of the source.
while (retest) {
- bool result = ::gl::compileShader(shaderDomain, currentSrc, shaderDefines, shaderObject.glshader, compilationLogs[version].message);
+ bool result = ::gl::compileShader(shaderDomain, currentSrc, shaderObject.glshader, compilationLogs[version].message);
compilationLogs[version].compiled = result;
if (!result) {
std::string newSrc;
@@ -101,7 +135,7 @@ GLShader* GLBackend::compileBackendShader(const Shader& shader, const Shader::Co
}
}
} else {
- compilationLogs[version].compiled = ::gl::compileShader(shaderDomain, shaderSource, shaderDefines, shaderObject.glshader, compilationLogs[version].message);
+ compilationLogs[version].compiled = ::gl::compileShader(shaderDomain, shaderSource, shaderObject.glshader, compilationLogs[version].message);
}
if (!compilationLogs[version].compiled) {
@@ -120,43 +154,80 @@ GLShader* GLBackend::compileBackendShader(const Shader& shader, const Shader::Co
return object;
}
+std::atomic gpuBinaryShadersLoaded;
+
GLShader* GLBackend::compileBackendProgram(const Shader& program, const Shader::CompilationHandler& handler) {
if (!program.isProgram()) {
return nullptr;
}
GLShader::ShaderObjects programObjects;
-
program.incrementCompilationAttempt();
Shader::CompilationLogs compilationLogs(GLShader::NumVersions);
for (int version = 0; version < GLShader::NumVersions; version++) {
auto& programObject = programObjects[version];
+ auto programSource = getShaderSource(program, version);
+ auto hash = ::gl::getShaderHash(programSource);
- // Let's go through every shaders and make sure they are ready to go
- std::vector< GLuint > shaderGLObjects;
- for (auto subShader : program.getShaders()) {
- auto object = GLShader::sync((*this), *subShader, handler);
- if (object) {
- shaderGLObjects.push_back(object->_shaderObjects[version].glshader);
- } else {
- qCWarning(gpugllogging) << "GLBackend::compileBackendProgram - One of the shaders of the program is not compiled?";
- compilationLogs[version].compiled = false;
- compilationLogs[version].message = std::string("Failed to compile, one of the shaders of the program is not compiled ?");
- program.setCompilationLogs(compilationLogs);
- return nullptr;
+ CachedShader cachedBinary;
+ {
+ Lock shaderCacheLock{ _shaderBinaryCache._mutex };
+ if (_shaderBinaryCache._binaries.count(hash) != 0) {
+ cachedBinary = _shaderBinaryCache._binaries[hash];
+ }
+ }
+
+
+ GLuint glprogram = 0;
+
+ // If we have a cached binary program, try to load it instead of compiling the individual shaders
+ if (cachedBinary) {
+ glprogram = ::gl::compileProgram({}, compilationLogs[version].message, cachedBinary);
+ if (0 != glprogram) {
+ ++gpuBinaryShadersLoaded;
+ }
+ }
+
+ // If we have no program, then either no cached binary, or the binary failed to load (perhaps a GPU driver update invalidated the cache)
+ if (0 == glprogram) {
+ cachedBinary = CachedShader();
+ {
+ std::unique_lock shaderCacheLock{ _shaderBinaryCache._mutex };
+ _shaderBinaryCache._binaries.erase(hash);
+ }
+ // Let's go through every shaders and make sure they are ready to go
+ std::vector shaderGLObjects;
+ shaderGLObjects.reserve(program.getShaders().size());
+ for (auto subShader : program.getShaders()) {
+ auto object = GLShader::sync((*this), *subShader, handler);
+ if (object) {
+ shaderGLObjects.push_back(object->_shaderObjects[version].glshader);
+ } else {
+ qCWarning(gpugllogging) << "GLBackend::compileBackendProgram - One of the shaders of the program is not compiled?";
+ compilationLogs[version].compiled = false;
+ compilationLogs[version].message = std::string("Failed to compile, one of the shaders of the program is not compiled ?");
+ program.setCompilationLogs(compilationLogs);
+ return nullptr;
+ }
+ }
+
+ glprogram = ::gl::compileProgram(shaderGLObjects, compilationLogs[version].message, cachedBinary);
+ if (cachedBinary) {
+ cachedBinary.source = programSource;
+ std::unique_lock shaderCacheLock{ _shaderBinaryCache._mutex };
+ _shaderBinaryCache._binaries[hash] = cachedBinary;
}
}
- GLuint glprogram = ::gl::compileProgram(shaderGLObjects, compilationLogs[version].message, compilationLogs[version].binary);
if (glprogram == 0) {
qCWarning(gpugllogging) << "GLBackend::compileBackendProgram - Program didn't link:\n" << compilationLogs[version].message.c_str();
program.setCompilationLogs(compilationLogs);
return nullptr;
}
+
compilationLogs[version].compiled = true;
programObject.glprogram = glprogram;
-
makeProgramBindings(programObject);
}
// Compilation feedback
@@ -338,20 +409,15 @@ GLBackend::ElementResource GLBackend::getFormatFromGLUniform(GLenum gltype) {
};
-int GLBackend::makeUniformSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,
+int GLBackend::makeUniformSlots(const ShaderObject& shaderProgram, const Shader::BindingSet& slotBindings,
Shader::SlotSet& uniforms, Shader::SlotSet& textures, Shader::SlotSet& samplers) {
- GLint uniformsCount = 0;
+ auto& glprogram = shaderProgram.glprogram;
- glGetProgramiv(glprogram, GL_ACTIVE_UNIFORMS, &uniformsCount);
-
- for (int i = 0; i < uniformsCount; i++) {
- const GLint NAME_LENGTH = 256;
- GLchar name[NAME_LENGTH];
- GLint length = 0;
- GLint size = 0;
- GLenum type = 0;
- glGetActiveUniform(glprogram, i, NAME_LENGTH, &length, &size, &type, name);
- GLint location = glGetUniformLocation(glprogram, name);
+ for (const auto& uniform : shaderProgram.uniforms) {
+ const auto& type = uniform.type;
+ const auto& location = uniform.location;
+ const auto& size = uniform.size;
+ const auto& name = uniform.name;
const GLint INVALID_UNIFORM_LOCATION = -1;
// Try to make sense of the gltype
@@ -359,8 +425,8 @@ int GLBackend::makeUniformSlots(GLuint glprogram, const Shader::BindingSet& slot
// The uniform as a standard var type
if (location != INVALID_UNIFORM_LOCATION) {
+ auto sname = uniform.name;
// Let's make sure the name doesn't contains an array element
- std::string sname(name);
auto foundBracket = sname.find_first_of('[');
if (foundBracket != std::string::npos) {
// std::string arrayname = sname.substr(0, foundBracket);
@@ -397,10 +463,11 @@ int GLBackend::makeUniformSlots(GLuint glprogram, const Shader::BindingSet& slot
}
}
- return uniformsCount;
+ return static_cast(shaderProgram.uniforms.size());
}
-int GLBackend::makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& buffers) {
+int GLBackend::makeUniformBlockSlots(const ShaderObject& shaderProgram, const Shader::BindingSet& slotBindings, Shader::SlotSet& buffers) {
+ const auto& glprogram = shaderProgram.glprogram;
GLint buffersCount = 0;
glGetProgramiv(glprogram, GL_ACTIVE_UNIFORM_BLOCKS, &buffersCount);
@@ -479,7 +546,8 @@ int GLBackend::makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet&
return buffersCount;
}
-int GLBackend::makeInputSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& inputs) {
+int GLBackend::makeInputSlots(const ShaderObject& shaderProgram, const Shader::BindingSet& slotBindings, Shader::SlotSet& inputs) {
+ const auto& glprogram = shaderProgram.glprogram;
GLint inputsCount = 0;
glGetProgramiv(glprogram, GL_ACTIVE_ATTRIBUTES, &inputsCount);
@@ -501,7 +569,7 @@ int GLBackend::makeInputSlots(GLuint glprogram, const Shader::BindingSet& slotBi
return inputsCount;
}
-int GLBackend::makeOutputSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& outputs) {
+int GLBackend::makeOutputSlots(const ShaderObject& shaderProgram, const Shader::BindingSet& slotBindings, Shader::SlotSet& outputs) {
/* GLint outputsCount = 0;
glGetProgramiv(glprogram, GL_ACTIVE_, &outputsCount);
@@ -525,67 +593,19 @@ void GLBackend::makeProgramBindings(ShaderObject& shaderObject) {
if (!shaderObject.glprogram) {
return;
}
- GLuint glprogram = shaderObject.glprogram;
- GLint loc = -1;
-
- //Check for gpu specific attribute slotBindings
- loc = glGetAttribLocation(glprogram, "inPosition");
- if (loc >= 0 && loc != gpu::Stream::POSITION) {
- glBindAttribLocation(glprogram, gpu::Stream::POSITION, "inPosition");
- }
-
- loc = glGetAttribLocation(glprogram, "inNormal");
- if (loc >= 0 && loc != gpu::Stream::NORMAL) {
- glBindAttribLocation(glprogram, gpu::Stream::NORMAL, "inNormal");
- }
-
- loc = glGetAttribLocation(glprogram, "inColor");
- if (loc >= 0 && loc != gpu::Stream::COLOR) {
- glBindAttribLocation(glprogram, gpu::Stream::COLOR, "inColor");
- }
-
- loc = glGetAttribLocation(glprogram, "inTexCoord0");
- if (loc >= 0 && loc != gpu::Stream::TEXCOORD) {
- glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD, "inTexCoord0");
- }
-
- loc = glGetAttribLocation(glprogram, "inTangent");
- if (loc >= 0 && loc != gpu::Stream::TANGENT) {
- glBindAttribLocation(glprogram, gpu::Stream::TANGENT, "inTangent");
- }
-
- char attribName[] = "inTexCoordn";
- for (auto i = 0; i < 4; i++) {
- auto streamId = gpu::Stream::TEXCOORD1 + i;
-
- attribName[strlen(attribName) - 1] = '1' + i;
- loc = glGetAttribLocation(glprogram, attribName);
- if (loc >= 0 && loc != streamId) {
- glBindAttribLocation(glprogram, streamId, attribName);
- }
- }
-
- loc = glGetAttribLocation(glprogram, "inSkinClusterIndex");
- if (loc >= 0 && loc != gpu::Stream::SKIN_CLUSTER_INDEX) {
- glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_INDEX, "inSkinClusterIndex");
- }
-
- loc = glGetAttribLocation(glprogram, "inSkinClusterWeight");
- if (loc >= 0 && loc != gpu::Stream::SKIN_CLUSTER_WEIGHT) {
- glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_WEIGHT, "inSkinClusterWeight");
- }
-
- loc = glGetAttribLocation(glprogram, "_drawCallInfo");
- if (loc >= 0 && loc != gpu::Stream::DRAW_CALL_INFO) {
- glBindAttribLocation(glprogram, gpu::Stream::DRAW_CALL_INFO, "_drawCallInfo");
- }
-
- // Link again to take into account the assigned attrib location
- glLinkProgram(glprogram);
-
- GLint linked = 0;
- glGetProgramiv(glprogram, GL_LINK_STATUS, &linked);
- if (!linked) {
- qCWarning(gpugllogging) << "GLShader::makeBindings - failed to link after assigning slotBindings?";
- }
+}
+
+
+void GLBackend::initShaderBinaryCache() {
+ GLint numBinFormats = 0;
+ glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &numBinFormats);
+ if (numBinFormats > 0) {
+ _shaderBinaryCache._formats.resize(numBinFormats);
+ glGetIntegerv(GL_PROGRAM_BINARY_FORMATS, _shaderBinaryCache._formats.data());
+ }
+ ::gl::loadShaderCache(_shaderBinaryCache._binaries);
+}
+
+void GLBackend::killShaderBinaryCache() {
+ ::gl::saveShaderCache(_shaderBinaryCache._binaries);
}
diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLShader.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLShader.cpp
index 010a7c479c..0a527185ef 100644
--- a/libraries/gpu-gl-common/src/gpu/gl/GLShader.cpp
+++ b/libraries/gpu-gl-common/src/gpu/gl/GLShader.cpp
@@ -68,22 +68,23 @@ bool GLShader::makeProgram(GLBackend& backend, Shader& shader, const Shader::Bin
for (int version = 0; version < GLShader::NumVersions; version++) {
auto& shaderObject = object->_shaderObjects[version];
if (shaderObject.glprogram) {
+ shaderObject.uniforms = ::gl::loadUniforms(shaderObject.glprogram);
Shader::SlotSet buffers;
- backend.makeUniformBlockSlots(shaderObject.glprogram, slotBindings, buffers);
+ backend.makeUniformBlockSlots(shaderObject, slotBindings, buffers);
Shader::SlotSet uniforms;
Shader::SlotSet textures;
Shader::SlotSet samplers;
- backend.makeUniformSlots(shaderObject.glprogram, slotBindings, uniforms, textures, samplers);
+ backend.makeUniformSlots(shaderObject, slotBindings, uniforms, textures, samplers);
Shader::SlotSet resourceBuffers;
- backend.makeResourceBufferSlots(shaderObject.glprogram, slotBindings, resourceBuffers);
+ backend.makeResourceBufferSlots(shaderObject, slotBindings, resourceBuffers);
Shader::SlotSet inputs;
- backend.makeInputSlots(shaderObject.glprogram, slotBindings, inputs);
+ backend.makeInputSlots(shaderObject, slotBindings, inputs);
Shader::SlotSet outputs;
- backend.makeOutputSlots(shaderObject.glprogram, slotBindings, outputs);
+ backend.makeOutputSlots(shaderObject, slotBindings, outputs);
// Define the public slots only from the default version
if (version == 0) {
diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLShader.h b/libraries/gpu-gl-common/src/gpu/gl/GLShader.h
index 3259982e93..0ba77e50c6 100644
--- a/libraries/gpu-gl-common/src/gpu/gl/GLShader.h
+++ b/libraries/gpu-gl-common/src/gpu/gl/GLShader.h
@@ -9,14 +9,17 @@
#define hifi_gpu_gl_GLShader_h
#include "GLShared.h"
+#include
namespace gpu { namespace gl {
struct ShaderObject {
+ using Uniforms = ::gl::Uniforms;
GLuint glshader { 0 };
GLuint glprogram { 0 };
GLint transformCameraSlot { -1 };
GLint transformObjectSlot { -1 };
+ Uniforms uniforms;
};
class GLShader : public GPUObject {
diff --git a/libraries/gpu-gl/src/gpu/gl41/GL41Backend.h b/libraries/gpu-gl/src/gpu/gl41/GL41Backend.h
index 23dcac0d8d..e840b9fe78 100644
--- a/libraries/gpu-gl/src/gpu/gl41/GL41Backend.h
+++ b/libraries/gpu-gl/src/gpu/gl41/GL41Backend.h
@@ -173,7 +173,7 @@ protected:
std::string getBackendShaderHeader() const override;
void makeProgramBindings(ShaderObject& shaderObject) override;
- int makeResourceBufferSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) override;
+ int makeResourceBufferSlots(const ShaderObject& shaderProgram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) override;
};
diff --git a/libraries/gpu-gl/src/gpu/gl41/GL41BackendShader.cpp b/libraries/gpu-gl/src/gpu/gl41/GL41BackendShader.cpp
index 64c9033cf7..0fa1b1bf42 100644
--- a/libraries/gpu-gl/src/gpu/gl41/GL41BackendShader.cpp
+++ b/libraries/gpu-gl/src/gpu/gl41/GL41BackendShader.cpp
@@ -22,20 +22,13 @@ std::string GL41Backend::getBackendShaderHeader() const {
return header;
}
-int GL41Backend::makeResourceBufferSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) {
+int GL41Backend::makeResourceBufferSlots(const ShaderObject& shaderProgram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) {
GLint ssboCount = 0;
- GLint uniformsCount = 0;
-
- glGetProgramiv(glprogram, GL_ACTIVE_UNIFORMS, &uniformsCount);
-
- for (int i = 0; i < uniformsCount; i++) {
- const GLint NAME_LENGTH = 256;
- GLchar name[NAME_LENGTH];
- GLint length = 0;
- GLint size = 0;
- GLenum type = 0;
- glGetActiveUniform(glprogram, i, NAME_LENGTH, &length, &size, &type, name);
- GLint location = glGetUniformLocation(glprogram, name);
+ const auto& glprogram = shaderProgram.glprogram;
+ for (const auto& uniform : shaderProgram.uniforms) {
+ const auto& name = uniform.name;
+ const auto& type = uniform.type;
+ const auto& location = uniform.location;
const GLint INVALID_UNIFORM_LOCATION = -1;
// Try to make sense of the gltype
diff --git a/libraries/gpu-gl/src/gpu/gl45/GL45Backend.h b/libraries/gpu-gl/src/gpu/gl45/GL45Backend.h
index 0db9271f57..cb7ddce930 100644
--- a/libraries/gpu-gl/src/gpu/gl45/GL45Backend.h
+++ b/libraries/gpu-gl/src/gpu/gl45/GL45Backend.h
@@ -274,7 +274,7 @@ protected:
// Shader Stage
std::string getBackendShaderHeader() const override;
void makeProgramBindings(ShaderObject& shaderObject) override;
- int makeResourceBufferSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) override;
+ int makeResourceBufferSlots(const ShaderObject& shaderProgram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) override;
// Texture Management Stage
void initTextureManagementStage() override;
diff --git a/libraries/gpu-gl/src/gpu/gl45/GL45BackendShader.cpp b/libraries/gpu-gl/src/gpu/gl45/GL45BackendShader.cpp
index c2b1c8e1af..f1f388d501 100644
--- a/libraries/gpu-gl/src/gpu/gl45/GL45BackendShader.cpp
+++ b/libraries/gpu-gl/src/gpu/gl45/GL45BackendShader.cpp
@@ -27,7 +27,8 @@ std::string GL45Backend::getBackendShaderHeader() const {
return header;
}
-int GL45Backend::makeResourceBufferSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) {
+int GL45Backend::makeResourceBufferSlots(const ShaderObject& shaderProgram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) {
+ const auto& glprogram = shaderProgram.glprogram;
GLint buffersCount = 0;
glGetProgramInterfaceiv(glprogram, GL_SHADER_STORAGE_BLOCK, GL_ACTIVE_RESOURCES, &buffersCount);
diff --git a/libraries/gpu-gles/src/gpu/gles/GLESBackend.h b/libraries/gpu-gles/src/gpu/gles/GLESBackend.h
index cb8e4abb29..9656d29ac5 100644
--- a/libraries/gpu-gles/src/gpu/gles/GLESBackend.h
+++ b/libraries/gpu-gles/src/gpu/gles/GLESBackend.h
@@ -164,7 +164,7 @@ protected:
std::string getBackendShaderHeader() const override;
void makeProgramBindings(ShaderObject& shaderObject) override;
- int makeResourceBufferSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) override;
+ int makeResourceBufferSlots(const ShaderObject& shaderObject, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) override;
};
diff --git a/libraries/gpu-gles/src/gpu/gles/GLESBackendShader.cpp b/libraries/gpu-gles/src/gpu/gles/GLESBackendShader.cpp
index 7e8056ba79..34caa97696 100644
--- a/libraries/gpu-gles/src/gpu/gles/GLESBackendShader.cpp
+++ b/libraries/gpu-gles/src/gpu/gles/GLESBackendShader.cpp
@@ -25,20 +25,15 @@ std::string GLESBackend::getBackendShaderHeader() const {
return header;
}
-int GLESBackend::makeResourceBufferSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) {
+int GLESBackend::makeResourceBufferSlots(const ShaderObject& shaderObject, const Shader::BindingSet& slotBindings,Shader::SlotSet& resourceBuffers) {
GLint ssboCount = 0;
- GLint uniformsCount = 0;
+ GLint uniformsCount = 0;
+ const auto& glprogram = shaderObject.glprogram;
- glGetProgramiv(glprogram, GL_ACTIVE_UNIFORMS, &uniformsCount);
-
- for (int i = 0; i < uniformsCount; i++) {
- const GLint NAME_LENGTH = 256;
- GLchar name[NAME_LENGTH];
- GLint length = 0;
- GLint size = 0;
- GLenum type = 0;
- glGetActiveUniform(glprogram, i, NAME_LENGTH, &length, &size, &type, name);
- GLint location = glGetUniformLocation(glprogram, name);
+ for (const auto& uniform : shaderObject.uniforms) {
+ const auto& type = uniform.type;
+ const auto& location = uniform.location;
+ const auto& name = uniform.name;
const GLint INVALID_UNIFORM_LOCATION = -1;
// Try to make sense of the gltype
diff --git a/libraries/gpu/src/gpu/Context.cpp b/libraries/gpu/src/gpu/Context.cpp
index 909ed23669..bb6b27626a 100644
--- a/libraries/gpu/src/gpu/Context.cpp
+++ b/libraries/gpu/src/gpu/Context.cpp
@@ -53,6 +53,13 @@ Context::~Context() {
_batchPool.clear();
}
+void Context::shutdown() {
+ if (_backend) {
+ _backend->shutdown();
+ _backend.reset();
+ }
+}
+
const std::string& Context::getBackendVersion() const {
return _backend->getVersion();
}
diff --git a/libraries/gpu/src/gpu/Context.h b/libraries/gpu/src/gpu/Context.h
index 4683f442e0..4560ea5526 100644
--- a/libraries/gpu/src/gpu/Context.h
+++ b/libraries/gpu/src/gpu/Context.h
@@ -54,6 +54,7 @@ class Backend {
public:
virtual ~Backend(){};
+ virtual void shutdown() {}
virtual const std::string& getVersion() const = 0;
void setStereoState(const StereoState& stereo) { _stereo = stereo; }
@@ -154,6 +155,7 @@ public:
Context();
~Context();
+ void shutdown();
const std::string& getBackendVersion() const;
void beginFrame(const glm::mat4& renderView = glm::mat4(), const glm::mat4& renderPose = glm::mat4());
diff --git a/libraries/gpu/src/gpu/Shader.h b/libraries/gpu/src/gpu/Shader.h
index d898411006..fe92da1469 100755
--- a/libraries/gpu/src/gpu/Shader.h
+++ b/libraries/gpu/src/gpu/Shader.h
@@ -54,13 +54,11 @@ public:
struct CompilationLog {
std::string message;
- std::vector binary;
bool compiled{ false };
CompilationLog() {}
CompilationLog(const CompilationLog& src) :
message(src.message),
- binary(src.binary),
compiled(src.compiled) {}
};
using CompilationLogs = std::vector;
diff --git a/libraries/input-plugins/src/input-plugins/KeyboardMouseDevice.cpp b/libraries/input-plugins/src/input-plugins/KeyboardMouseDevice.cpp
index 8ecf527a14..650c9675a7 100755
--- a/libraries/input-plugins/src/input-plugins/KeyboardMouseDevice.cpp
+++ b/libraries/input-plugins/src/input-plugins/KeyboardMouseDevice.cpp
@@ -279,7 +279,7 @@ controller::Input KeyboardMouseDevice::InputDevice::makeInput(KeyboardMouseDevic
* moved down. The data value is how far the average position of all touch points moved.
*
*
- * @typedef Controller.Hardware-Keyboard
+ * @typedef {object} Controller.Hardware-Keyboard
* @todo Currently, the mouse wheel in an ordinary mouse generates left/right wheel events instead of up/down.
*/
controller::Input::NamedVector KeyboardMouseDevice::InputDevice::getAvailableInputs() const {
diff --git a/libraries/model-networking/src/model-networking/ModelCache.h b/libraries/model-networking/src/model-networking/ModelCache.h
index 7e911bc9bf..ee13d6666c 100644
--- a/libraries/model-networking/src/model-networking/ModelCache.h
+++ b/libraries/model-networking/src/model-networking/ModelCache.h
@@ -179,7 +179,7 @@ public:
* @function ModelCache.prefetch
* @param {string} url - URL of the resource to prefetch.
* @param {object} [extra=null]
- * @returns {Resource}
+ * @returns {ResourceObject}
*/
/**jsdoc
@@ -188,7 +188,7 @@ public:
* @param {string} url - URL of the resource to load.
* @param {string} [fallback=""] - Fallback URL if load of the desired URL fails.
* @param {} [extra=null]
- * @returns {Resource}
+ * @returns {object}
*/
diff --git a/libraries/model-networking/src/model-networking/TextureCache.h b/libraries/model-networking/src/model-networking/TextureCache.h
index 898f0e3d3a..bca64806c4 100644
--- a/libraries/model-networking/src/model-networking/TextureCache.h
+++ b/libraries/model-networking/src/model-networking/TextureCache.h
@@ -195,7 +195,7 @@ public:
* @function TextureCache.prefetch
* @param {string} url - URL of the resource to prefetch.
* @param {object} [extra=null]
- * @returns {Resource}
+ * @returns {ResourceObject}
*/
/**jsdoc
@@ -204,7 +204,7 @@ public:
* @param {string} url - URL of the resource to load.
* @param {string} [fallback=""] - Fallback URL if load of the desired URL fails.
* @param {} [extra=null]
- * @returns {Resource}
+ * @returns {object}
*/
@@ -261,7 +261,7 @@ protected:
* @param {string} url
* @param {number} type
* @param {number} [maxNumPixels=67108864]
- * @returns {Resource}
+ * @returns {ResourceObject}
*/
// Overload ResourceCache::prefetch to allow specifying texture type for loads
Q_INVOKABLE ScriptableResource* prefetch(const QUrl& url, int type, int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);
diff --git a/libraries/networking/src/AddressManager.h b/libraries/networking/src/AddressManager.h
index 7832b26c96..38eb7ee670 100644
--- a/libraries/networking/src/AddressManager.h
+++ b/libraries/networking/src/AddressManager.h
@@ -138,7 +138,7 @@ public:
*
*
*
- * @typedef location.LookupTrigger
+ * @typedef {number} location.LookupTrigger
*/
enum LookupTrigger {
UserInput,
@@ -184,7 +184,7 @@ public slots:
/**jsdoc
* Go to a specified metaverse address.
* @function location.handleLookupString
- * @param {string} address - The address to go to: a "hifi:/" address, an IP address (e.g.,
+ * @param {string} address - The address to go to: a "hifi://" address, an IP address (e.g.,
* "127.0.0.1"
or "localhost"
), a domain name, a named path on a domain (starts with
* "/"
), a position or position and orientation, or a user (starts with "@"
).
* @param {boolean} fromSuggestions=false - Set to true
if the address is obtained from the "Goto" dialog.
diff --git a/libraries/networking/src/DomainHandler.h b/libraries/networking/src/DomainHandler.h
index 08908dbaf6..4d98391104 100644
--- a/libraries/networking/src/DomainHandler.h
+++ b/libraries/networking/src/DomainHandler.h
@@ -137,7 +137,7 @@ public:
*
*
*
- * @typedef Window.ConnectionRefusedReason
+ * @typedef {number} Window.ConnectionRefusedReason
*/
enum class ConnectionRefusedReason : uint8_t {
Unknown,
diff --git a/libraries/networking/src/ResourceCache.cpp b/libraries/networking/src/ResourceCache.cpp
index 28266d0a7f..18e60ef5ef 100644
--- a/libraries/networking/src/ResourceCache.cpp
+++ b/libraries/networking/src/ResourceCache.cpp
@@ -218,8 +218,8 @@ ScriptableResource* ResourceCache::prefetch(const QUrl& url, void* extra) {
}
ResourceCache::ResourceCache(QObject* parent) : QObject(parent) {
- auto nodeList = DependencyManager::get();
- if (nodeList) {
+ if (DependencyManager::isSet()) {
+ auto nodeList = DependencyManager::get();
auto& domainHandler = nodeList->getDomainHandler();
connect(&domainHandler, &DomainHandler::disconnectedFromDomain,
this, &ResourceCache::clearATPAssets, Qt::DirectConnection);
diff --git a/libraries/networking/src/ResourceCache.h b/libraries/networking/src/ResourceCache.h
index 18840cd11e..a4bd352563 100644
--- a/libraries/networking/src/ResourceCache.h
+++ b/libraries/networking/src/ResourceCache.h
@@ -87,7 +87,7 @@ private:
class ScriptableResource : public QObject {
/**jsdoc
- * @constructor Resource
+ * @class ResourceObject
*
* @hifi-interface
* @hifi-client-entity
@@ -97,11 +97,6 @@ class ScriptableResource : public QObject {
* @property {string} url - URL of this resource.
* @property {Resource.State} state - Current loading state.
*/
- /**jsdoc
- * @namespace Resource
- * @variation 0
- * @property {Resource.State} State
- */
Q_OBJECT
Q_PROPERTY(QUrl url READ getURL)
Q_PROPERTY(int state READ getState NOTIFY stateChanged)
@@ -109,8 +104,7 @@ class ScriptableResource : public QObject {
public:
/**jsdoc
- * @name Resource.State
- * @static
+ * @typedef {object} Resource.State
* @property {number} QUEUED - The resource is queued up, waiting to be loaded.
* @property {number} LOADING - The resource is downloading.
* @property {number} LOADED - The resource has finished downloaded by is not complete.
@@ -131,7 +125,7 @@ public:
/**jsdoc
* Release this resource.
- * @function Resource#release
+ * @function ResourceObject#release
*/
Q_INVOKABLE void release();
@@ -146,7 +140,7 @@ signals:
/**jsdoc
* Triggered when download progress for this resource has changed.
- * @function Resource#progressChanged
+ * @function ResourceObject#progressChanged
* @param {number} bytesReceived - Byytes downloaded so far.
* @param {number} bytesTotal - Total number of bytes in the resource.
* @returns {Signal}
@@ -155,7 +149,7 @@ signals:
/**jsdoc
* Triggered when resource loading state has changed.
- * @function Resource#stateChanged
+ * @function ResourceObject#stateChanged
* @param {Resource.State} state - New state.
* @returns {Signal}
*/
@@ -262,7 +256,7 @@ protected slots:
* @function ResourceCache.prefetch
* @param {string} url - URL of the resource to prefetch.
* @param {object} [extra=null]
- * @returns {Resource}
+ * @returns {ResourceObject}
*/
// Prefetches a resource to be held by the QScriptEngine.
// Left as a protected member so subclasses can overload prefetch
@@ -275,8 +269,9 @@ protected slots:
* @param {string} url - URL of the resource to load.
* @param {string} [fallback=""] - Fallback URL if load of the desired URL fails.
* @param {} [extra=null]
- * @returns {Resource}
+ * @returns {object}
*/
+ // FIXME: The return type is not recognized by JavaScript.
/// Loads a resource from the specified URL and returns it.
/// If the caller is on a different thread than the ResourceCache,
/// returns an empty smart pointer and loads its asynchronously.
diff --git a/libraries/physics/src/EntityMotionState.cpp b/libraries/physics/src/EntityMotionState.cpp
index bc293b785b..9718c636bc 100644
--- a/libraries/physics/src/EntityMotionState.cpp
+++ b/libraries/physics/src/EntityMotionState.cpp
@@ -242,11 +242,9 @@ void EntityMotionState::getWorldTransform(btTransform& worldTrans) const {
uint32_t thisStep = ObjectMotionState::getWorldSimulationStep();
float dt = (thisStep - _lastKinematicStep) * PHYSICS_ENGINE_FIXED_SUBSTEP;
+ _lastKinematicStep = thisStep;
_entity->stepKinematicMotion(dt);
- // bypass const-ness so we can remember the step
- const_cast(this)->_lastKinematicStep = thisStep;
-
// and set the acceleration-matches-gravity count high so that if we send an update
// it will use the correct acceleration for remote simulations
_accelerationNearlyGravityCount = (uint8_t)(-1);
diff --git a/libraries/physics/src/ObjectMotionState.h b/libraries/physics/src/ObjectMotionState.h
index 24dd655342..bb3c00bd5d 100644
--- a/libraries/physics/src/ObjectMotionState.h
+++ b/libraries/physics/src/ObjectMotionState.h
@@ -184,7 +184,7 @@ protected:
btRigidBody* _body { nullptr };
float _density { 1.0f };
- uint32_t _lastKinematicStep;
+ mutable uint32_t _lastKinematicStep;
bool _hasInternalKinematicChanges { false };
};
diff --git a/libraries/physics/src/PhysicsEngine.cpp b/libraries/physics/src/PhysicsEngine.cpp
index fc54970585..4811813dd4 100644
--- a/libraries/physics/src/PhysicsEngine.cpp
+++ b/libraries/physics/src/PhysicsEngine.cpp
@@ -71,8 +71,8 @@ void PhysicsEngine::init() {
}
}
-uint32_t PhysicsEngine::getNumSubsteps() {
- return _numSubsteps;
+uint32_t PhysicsEngine::getNumSubsteps() const {
+ return _dynamicsWorld->getNumSubsteps();
}
// private
@@ -331,13 +331,9 @@ void PhysicsEngine::stepSimulation() {
PHYSICS_ENGINE_FIXED_SUBSTEP, onSubStep);
if (numSubsteps > 0) {
BT_PROFILE("postSimulation");
- _numSubsteps += (uint32_t)numSubsteps;
- ObjectMotionState::setWorldSimulationStep(_numSubsteps);
-
if (_myAvatarController) {
_myAvatarController->postSimulation();
}
-
_hasOutgoingChanges = true;
}
diff --git a/libraries/physics/src/PhysicsEngine.h b/libraries/physics/src/PhysicsEngine.h
index 0dfe3a7a7c..2ac195956a 100644
--- a/libraries/physics/src/PhysicsEngine.h
+++ b/libraries/physics/src/PhysicsEngine.h
@@ -52,7 +52,7 @@ public:
~PhysicsEngine();
void init();
- uint32_t getNumSubsteps();
+ uint32_t getNumSubsteps() const;
void removeObjects(const VectorOfMotionStates& objects);
void removeSetOfObjects(const SetOfMotionStates& objects); // only called during teardown
@@ -135,7 +135,6 @@ private:
CharacterController* _myAvatarController;
uint32_t _numContactFrames = 0;
- uint32_t _numSubsteps;
bool _dumpNextStats { false };
bool _saveNextStats { false };
diff --git a/libraries/physics/src/ThreadSafeDynamicsWorld.cpp b/libraries/physics/src/ThreadSafeDynamicsWorld.cpp
index 3f24851dce..07d5ceb9ac 100644
--- a/libraries/physics/src/ThreadSafeDynamicsWorld.cpp
+++ b/libraries/physics/src/ThreadSafeDynamicsWorld.cpp
@@ -59,14 +59,11 @@ int ThreadSafeDynamicsWorld::stepSimulationWithSubstepCallback(btScalar timeStep
}
}
- /*//process some debugging flags
- if (getDebugDrawer()) {
- btIDebugDraw* debugDrawer = getDebugDrawer();
- gDisableDeactivation = (debugDrawer->getDebugMode() & btIDebugDraw::DBG_NoDeactivation) != 0;
- }*/
if (subSteps) {
//clamp the number of substeps, to prevent simulation grinding spiralling down to a halt
int clampedSimulationSteps = (subSteps > maxSubSteps)? maxSubSteps : subSteps;
+ _numSubsteps += clampedSimulationSteps;
+ ObjectMotionState::setWorldSimulationStep(_numSubsteps);
saveKinematicState(fixedTimeStep*clampedSimulationSteps);
@@ -98,28 +95,24 @@ int ThreadSafeDynamicsWorld::stepSimulationWithSubstepCallback(btScalar timeStep
// call this instead of non-virtual btDiscreteDynamicsWorld::synchronizeSingleMotionState()
void ThreadSafeDynamicsWorld::synchronizeMotionState(btRigidBody* body) {
btAssert(body);
- if (body->getMotionState() && !body->isStaticObject()) {
- //we need to call the update at least once, even for sleeping objects
- //otherwise the 'graphics' transform never updates properly
- ///@todo: add 'dirty' flag
- //if (body->getActivationState() != ISLAND_SLEEPING)
- {
- if (body->isKinematicObject()) {
- ObjectMotionState* objectMotionState = static_cast(body->getMotionState());
- if (objectMotionState->hasInternalKinematicChanges()) {
- objectMotionState->clearInternalKinematicChanges();
- body->getMotionState()->setWorldTransform(body->getWorldTransform());
- }
- return;
- }
- btTransform interpolatedTransform;
- btTransformUtil::integrateTransform(body->getInterpolationWorldTransform(),
- body->getInterpolationLinearVelocity(),body->getInterpolationAngularVelocity(),
- (m_latencyMotionStateInterpolation && m_fixedTimeStep) ? m_localTime - m_fixedTimeStep : m_localTime*body->getHitFraction(),
- interpolatedTransform);
- body->getMotionState()->setWorldTransform(interpolatedTransform);
+ btAssert(body->getMotionState());
+
+ if (body->isKinematicObject()) {
+ ObjectMotionState* objectMotionState = static_cast(body->getMotionState());
+ if (objectMotionState->hasInternalKinematicChanges()) {
+ // this is a special case where the kinematic motion has been updated by an Action
+ // so we supply the body's current transform to the MotionState
+ objectMotionState->clearInternalKinematicChanges();
+ body->getMotionState()->setWorldTransform(body->getWorldTransform());
}
+ return;
}
+ btTransform interpolatedTransform;
+ btTransformUtil::integrateTransform(body->getInterpolationWorldTransform(),
+ body->getInterpolationLinearVelocity(),body->getInterpolationAngularVelocity(),
+ (m_latencyMotionStateInterpolation && m_fixedTimeStep) ? m_localTime - m_fixedTimeStep : m_localTime*body->getHitFraction(),
+ interpolatedTransform);
+ body->getMotionState()->setWorldTransform(interpolatedTransform);
}
void ThreadSafeDynamicsWorld::synchronizeMotionStates() {
@@ -164,24 +157,12 @@ void ThreadSafeDynamicsWorld::synchronizeMotionStates() {
}
void ThreadSafeDynamicsWorld::saveKinematicState(btScalar timeStep) {
-///would like to iterate over m_nonStaticRigidBodies, but unfortunately old API allows
-///to switch status _after_ adding kinematic objects to the world
-///fix it for Bullet 3.x release
DETAILED_PROFILE_RANGE(simulation_physics, "saveKinematicState");
BT_PROFILE("saveKinematicState");
- for (int i=0;igetActivationState() != ISLAND_SLEEPING)
- {
- if (body->isKinematicObject())
- {
- //to calculate velocities next frame
- body->saveKinematicState(timeStep);
- }
+ for (int i=0;iisKinematicObject() && body->getActivationState() != ISLAND_SLEEPING) {
+ body->saveKinematicState(timeStep);
}
}
}
-
-
diff --git a/libraries/physics/src/ThreadSafeDynamicsWorld.h b/libraries/physics/src/ThreadSafeDynamicsWorld.h
index 54c3ddb756..d8cee4d2de 100644
--- a/libraries/physics/src/ThreadSafeDynamicsWorld.h
+++ b/libraries/physics/src/ThreadSafeDynamicsWorld.h
@@ -37,6 +37,7 @@ public:
btConstraintSolver* constraintSolver,
btCollisionConfiguration* collisionConfiguration);
+ int getNumSubsteps() const { return _numSubsteps; }
int stepSimulationWithSubstepCallback(btScalar timeStep, int maxSubSteps = 1,
btScalar fixedTimeStep = btScalar(1.)/btScalar(60.),
SubStepCallback onSubStep = []() { });
@@ -61,6 +62,7 @@ private:
VectorOfMotionStates _deactivatedStates;
SetOfMotionStates _activeStates;
SetOfMotionStates _lastActiveStates;
+ int _numSubsteps { 0 };
};
#endif // hifi_ThreadSafeDynamicsWorld_h
diff --git a/libraries/script-engine/src/AssetScriptingInterface.h b/libraries/script-engine/src/AssetScriptingInterface.h
index 7f7a3a68b0..72d6901fb5 100644
--- a/libraries/script-engine/src/AssetScriptingInterface.h
+++ b/libraries/script-engine/src/AssetScriptingInterface.h
@@ -121,7 +121,7 @@ public:
/**jsdoc
* A set of properties that can be passed to {@link Assets.getAsset}.
- * @typedef {Object} Assets.GetOptions
+ * @typedef {object} Assets.GetOptions
* @property {string} [url] an "atp:" style URL, hash, or relative mapped path to fetch
* @property {string} [responseType=text] the desired reponse type (text | arraybuffer | json)
* @property {boolean} [decompress=false] whether to attempt gunzip decompression on the fetched data
@@ -137,7 +137,7 @@ public:
/**jsdoc
* Result value returned by {@link Assets.getAsset}.
- * @typedef {Object} Assets~getAssetResult
+ * @typedef {object} Assets~getAssetResult
* @property {string} [url] the resolved "atp:" style URL for the fetched asset
* @property {string} [hash] the resolved hash for the fetched asset
* @property {string|ArrayBuffer|Object} [response] response data (possibly converted per .responseType value)
@@ -159,7 +159,7 @@ public:
/**jsdoc
* A set of properties that can be passed to {@link Assets.putAsset}.
- * @typedef {Object} Assets.PutOptions
+ * @typedef {object} Assets.PutOptions
* @property {ArrayBuffer|string} [data] byte buffer or string value representing the new asset's content
* @property {string} [path=null] ATP path mapping to automatically create (upon successful upload to hash)
* @property {boolean} [compress=false] whether to gzip compress data before uploading
@@ -174,7 +174,7 @@ public:
/**jsdoc
* Result value returned by {@link Assets.putAsset}.
- * @typedef {Object} Assets~putAssetResult
+ * @typedef {object} Assets~putAssetResult
* @property {string} [url] the resolved "atp:" style URL for the uploaded asset (based on .path if specified, otherwise on the resulting ATP hash)
* @property {string} [path] the uploaded asset's resulting ATP path (or undefined if no path mapping was assigned)
* @property {string} [hash] the uploaded asset's resulting ATP hash
diff --git a/libraries/script-engine/src/Quat.h b/libraries/script-engine/src/Quat.h
index 1ccdfdbf31..76b7ac45e3 100644
--- a/libraries/script-engine/src/Quat.h
+++ b/libraries/script-engine/src/Quat.h
@@ -43,7 +43,7 @@
* @hifi-server-entity
* @hifi-assignment-client
*
- * @property IDENTITY {Quat} { x: 0, y: 0, z: 0, w: 1 }
: The identity rotation, i.e., no rotation.
+ * @property {Quat} IDENTITY - { x: 0, y: 0, z: 0, w: 1 }
: The identity rotation, i.e., no rotation.
* Read-only.
* @example Print the IDENTITY
value.
* print(JSON.stringify(Quat.IDENTITY)); // { x: 0, y: 0, z: 0, w: 1 }
diff --git a/libraries/script-engine/src/SceneScriptingInterface.h b/libraries/script-engine/src/SceneScriptingInterface.h
index fdfbc6f6c0..da42cf2df3 100644
--- a/libraries/script-engine/src/SceneScriptingInterface.h
+++ b/libraries/script-engine/src/SceneScriptingInterface.h
@@ -21,7 +21,7 @@
namespace SceneScripting {
/**jsdoc
- * @typedef Scene.Stage.Location
+ * @typedef {object} Scene.Stage.Location
* @property {number} longitude
* @property {number} latitude
* @property {number} altitude
@@ -49,7 +49,7 @@ namespace SceneScripting {
using LocationPointer = std::unique_ptr;
/**jsdoc
- * @typedef Scene.Stage.Time
+ * @typedef {object} Scene.Stage.Time
* @property {number} hour
* @property {number} day
*/
@@ -73,7 +73,7 @@ namespace SceneScripting {
using TimePointer = std::unique_ptr;
/**jsdoc
- * @typedef Scene.Stage.KeyLight
+ * @typedef {object} Scene.Stage.KeyLight
* @property {Vec3} color
* @property {number} intensity
* @property {number} ambientIntensity
diff --git a/libraries/script-engine/src/ScriptEngine.cpp b/libraries/script-engine/src/ScriptEngine.cpp
index 23ffbabe77..f98bffb739 100644
--- a/libraries/script-engine/src/ScriptEngine.cpp
+++ b/libraries/script-engine/src/ScriptEngine.cpp
@@ -237,6 +237,14 @@ QString ScriptEngine::getContext() const {
return "unknown";
}
+bool ScriptEngine::isDebugMode() const {
+#if defined(DEBUG)
+ return true;
+#else
+ return false;
+#endif
+}
+
ScriptEngine::~ScriptEngine() {
auto scriptEngines = DependencyManager::get();
if (scriptEngines) {
@@ -558,6 +566,16 @@ static void scriptableResourceFromScriptValue(const QScriptValue& value, Scripta
resource = static_cast(value.toQObject());
}
+/**jsdoc
+ * @namespace Resource
+ *
+ * @hifi-interface
+ * @hifi-client-entity
+ * @hifi-server-entity
+ * @hifi-assignment-client
+ *
+ * @property {Resource.State} State
+ */
static QScriptValue createScriptableResourcePrototype(ScriptEnginePointer engine) {
auto prototype = engine->newObject();
diff --git a/libraries/script-engine/src/ScriptEngine.h b/libraries/script-engine/src/ScriptEngine.h
index 3001666b5d..c02a63ef3c 100644
--- a/libraries/script-engine/src/ScriptEngine.h
+++ b/libraries/script-engine/src/ScriptEngine.h
@@ -232,6 +232,12 @@ public:
*/
Q_INVOKABLE bool isClientScript() const { return _context == CLIENT_SCRIPT; }
+ /**jsdoc
+ * @function Script.isDebugMode
+ * @returns {boolean}
+ */
+ Q_INVOKABLE bool isDebugMode() const;
+
/**jsdoc
* @function Script.isEntityClientScript
* @returns {boolean}
diff --git a/libraries/script-engine/src/ScriptUUID.h b/libraries/script-engine/src/ScriptUUID.h
index 0af0c1cf8e..45e6ec0ad1 100644
--- a/libraries/script-engine/src/ScriptUUID.h
+++ b/libraries/script-engine/src/ScriptUUID.h
@@ -30,7 +30,7 @@
* @hifi-server-entity
* @hifi-assignment-client
*
- * @property NULL {Uuid} The null UUID, {00000000-0000-0000-0000-000000000000}
.
+ * @property {Uuid} NULL - The null UUID, {00000000-0000-0000-0000-000000000000}
.
*/
/// Scriptable interface for a UUID helper class object. Used exclusively in the JavaScript API
diff --git a/libraries/script-engine/src/Vec3.h b/libraries/script-engine/src/Vec3.h
index eb9438c5c2..696981d1b4 100644
--- a/libraries/script-engine/src/Vec3.h
+++ b/libraries/script-engine/src/Vec3.h
@@ -42,8 +42,7 @@
/**jsdoc
* The Vec3 API facilities for generating and manipulating 3-dimensional vectors. High Fidelity uses a right-handed
* Cartesian coordinate system where the y-axis is the "up" and the negative z-axis is the "front" direction.
- *
+ *
*
* @namespace Vec3
* @variation 0
diff --git a/libraries/shared/src/SharedUtil.h b/libraries/shared/src/SharedUtil.h
index 6e00e5c090..9875314aa4 100644
--- a/libraries/shared/src/SharedUtil.h
+++ b/libraries/shared/src/SharedUtil.h
@@ -25,6 +25,7 @@
#include
#include
+#include "NumericalConstants.h"
// When writing out avatarEntities to a QByteArray, if the parentID is the ID of MyAvatar, use this ID instead. This allows
// the value to be reset when the sessionID changes.
const QUuid AVATAR_SELF_ID = QUuid("{00000000-0000-0000-0000-000000000001}");
@@ -122,6 +123,27 @@ const QByteArray HIGH_FIDELITY_USER_AGENT = "Mozilla/5.0 (HighFidelityInterface)
quint64 usecTimestampNow(bool wantDebug = false);
void usecTimestampNowForceClockSkew(qint64 clockSkew);
+inline bool afterUsecs(quint64& startUsecs, quint64 maxIntervalUecs) {
+ auto now = usecTimestampNow();
+ auto interval = now - startUsecs;
+ if (interval > maxIntervalUecs) {
+ startUsecs = now;
+ return true;
+ }
+ return false;
+}
+
+inline bool afterSecs(quint64& startUsecs, quint64 maxIntervalSecs) {
+ return afterUsecs(startUsecs, maxIntervalSecs * USECS_PER_SECOND);
+}
+
+template
+void doEvery(quint64& lastReportUsecs, quint64 secs, F lamdba) {
+ if (afterSecs(lastReportUsecs, secs)) {
+ lamdba();
+ }
+}
+
// Number of seconds expressed since the first call to this function, expressed as a float
// Maximum accuracy in msecs
float secTimestampNow();
diff --git a/libraries/shared/src/shared/Camera.h b/libraries/shared/src/shared/Camera.h
index 32e753d0f9..d14489b92c 100644
--- a/libraries/shared/src/shared/Camera.h
+++ b/libraries/shared/src/shared/Camera.h
@@ -44,13 +44,14 @@ class Camera : public QObject {
* @hifi-interface
* @hifi-client-entity
*
- * @property position {Vec3} The position of the camera. You can set this value only when the camera is in independent mode.
- * @property orientation {Quat} The orientation of the camera. You can set this value only when the camera is in independent
+ * @property {Vec3} position - The position of the camera. You can set this value only when the camera is in independent
* mode.
- * @property mode {Camera.Mode} The camera mode.
- * @property frustum {ViewFrustum} The camera frustum.
- * @property cameraEntity {Uuid} The ID of the entity that is used for the camera position and orientation when the camera
- * is in entity mode.
+ * @property {Quat} orientation - The orientation of the camera. You can set this value only when the camera is in
+ * independent mode.
+ * @property {Camera.Mode} mode - The camera mode.
+ * @property {ViewFrustum} frustum - The camera frustum.
+ * @property {Uuid} cameraEntity - The ID of the entity that is used for the camera position and orientation when the
+ * camera is in entity mode.
*/
// FIXME: The cameraEntity property definition is copied from FancyCamera.h.
Q_PROPERTY(glm::vec3 position READ getPosition WRITE setPosition)
diff --git a/libraries/test-utils/CMakeLists.txt b/libraries/test-utils/CMakeLists.txt
index 2c23e96c1e..7c8ae7a37d 100644
--- a/libraries/test-utils/CMakeLists.txt
+++ b/libraries/test-utils/CMakeLists.txt
@@ -1,3 +1,3 @@
set(TARGET_NAME test-utils)
setup_hifi_library(Network Gui)
-
+link_hifi_libraries(shared)
diff --git a/tests/GLMTestUtils.h b/libraries/test-utils/src/test-utils/GLMTestUtils.h
similarity index 100%
rename from tests/GLMTestUtils.h
rename to libraries/test-utils/src/test-utils/GLMTestUtils.h
diff --git a/tests/QTestExtensions.h b/libraries/test-utils/src/test-utils/QTestExtensions.h
similarity index 96%
rename from tests/QTestExtensions.h
rename to libraries/test-utils/src/test-utils/QTestExtensions.h
index c9235eeedb..f618f94e5b 100644
--- a/tests/QTestExtensions.h
+++ b/libraries/test-utils/src/test-utils/QTestExtensions.h
@@ -313,27 +313,6 @@ inline QString getTestResource(const QString& relativePath) {
return QDir::cleanPath(dir.absoluteFilePath(relativePath));
}
-inline bool afterUsecs(quint64& startUsecs, quint64 maxIntervalUecs) {
- auto now = usecTimestampNow();
- auto interval = now - startUsecs;
- if (interval > maxIntervalUecs) {
- startUsecs = now;
- return true;
- }
- return false;
-}
-
-inline bool afterSecs(quint64& startUsecs, quint64 maxIntervalSecs) {
- return afterUsecs(startUsecs, maxIntervalSecs * USECS_PER_SECOND);
-}
-
-template
-void doEvery(quint64& lastReportUsecs, quint64 secs, F lamdba) {
- if (afterSecs(lastReportUsecs, secs)) {
- lamdba();
- }
-}
-
inline void failAfter(quint64 startUsecs, quint64 secs, const char* message) {
if (afterSecs(startUsecs, secs)) {
QFAIL(message);
diff --git a/libraries/ui/src/QmlFragmentClass.h b/libraries/ui/src/QmlFragmentClass.h
index 87c18a49ad..8a8d0e1732 100644
--- a/libraries/ui/src/QmlFragmentClass.h
+++ b/libraries/ui/src/QmlFragmentClass.h
@@ -20,7 +20,7 @@ public:
/**jsdoc
* Creates a new button, adds it to this and returns it.
* @function QmlFragmentClass#addButton
- * @param properties {Object} button properties
+ * @param properties {object} button properties
* @returns {TabletButtonProxy}
*/
Q_INVOKABLE QObject* addButton(const QVariant& properties);
diff --git a/libraries/ui/src/ui/TabletScriptingInterface.h b/libraries/ui/src/ui/TabletScriptingInterface.h
index f30c6de75e..43d889f1d1 100644
--- a/libraries/ui/src/ui/TabletScriptingInterface.h
+++ b/libraries/ui/src/ui/TabletScriptingInterface.h
@@ -493,7 +493,7 @@ protected:
int _stableOrder;
/**jsdoc
- * @typedef TabletButtonProxy.ButtonProperties
+ * @typedef {object} TabletButtonProxy.ButtonProperties
* @property {string} icon - URL to button icon. (50 x 50)
* @property {string} hoverIcon - URL to button icon, displayed during mouse hover. (50 x 50)
* @property {string} activeHoverIcon - URL to button icon used when button is active, and during mouse hover. (50 x 50)
diff --git a/plugins/oculus/src/OculusControllerManager.cpp b/plugins/oculus/src/OculusControllerManager.cpp
index b8761feed6..7a176d36fb 100644
--- a/plugins/oculus/src/OculusControllerManager.cpp
+++ b/plugins/oculus/src/OculusControllerManager.cpp
@@ -442,7 +442,7 @@ void OculusControllerManager::TouchDevice::stopHapticPulse(bool leftHand) {
* RightHand
number {@link Pose} right hand pose.
*
*
- * @typedef Controller.Hardware-OculusTouch
+ * @typedef {object} Controller.Hardware-OculusTouch
*/
controller::Input::NamedVector OculusControllerManager::TouchDevice::getAvailableInputs() const {
using namespace controller;
diff --git a/plugins/openvr/src/OpenVrDisplayPlugin.cpp b/plugins/openvr/src/OpenVrDisplayPlugin.cpp
index 5a7417cb49..5e4079cbcf 100644
--- a/plugins/openvr/src/OpenVrDisplayPlugin.cpp
+++ b/plugins/openvr/src/OpenVrDisplayPlugin.cpp
@@ -198,9 +198,9 @@ public:
std::string fsSource = HMD_REPROJECTION_FRAG;
GLuint vertexShader { 0 }, fragmentShader { 0 };
std::string error;
- std::vector binary;
- ::gl::compileShader(GL_VERTEX_SHADER, vsSource, "", vertexShader, error);
- ::gl::compileShader(GL_FRAGMENT_SHADER, fsSource, "", fragmentShader, error);
+ ::gl::CachedShader binary;
+ ::gl::compileShader(GL_VERTEX_SHADER, vsSource, vertexShader, error);
+ ::gl::compileShader(GL_FRAGMENT_SHADER, fsSource, fragmentShader, error);
_program = ::gl::compileProgram({ { vertexShader, fragmentShader } }, error, binary);
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
diff --git a/plugins/openvr/src/ViveControllerManager.cpp b/plugins/openvr/src/ViveControllerManager.cpp
index 96888a6e8b..635d9d0529 100644
--- a/plugins/openvr/src/ViveControllerManager.cpp
+++ b/plugins/openvr/src/ViveControllerManager.cpp
@@ -1297,7 +1297,7 @@ void ViveControllerManager::InputDevice::setConfigFromString(const QString& valu
* TrackedObject15
number {@link Pose} Tracker 15 pose.
*
*
- * @typedef Controller.Hardware-Vive
+ * @typedef {object} Controller.Hardware-Vive
*/
controller::Input::NamedVector ViveControllerManager::InputDevice::getAvailableInputs() const {
using namespace controller;
diff --git a/scripts/+android/defaultScripts.js b/scripts/+android/defaultScripts.js
index 98fbb4b1a7..8950af808d 100644
--- a/scripts/+android/defaultScripts.js
+++ b/scripts/+android/defaultScripts.js
@@ -16,8 +16,7 @@ var DEFAULT_SCRIPTS_COMBINED = [
"system/+android/touchscreenvirtualpad.js",
"system/+android/actionbar.js",
"system/+android/audio.js" ,
- "system/+android/modes.js",
- "system/+android/stats.js"/*,
+ "system/+android/modes.js"/*,
"system/away.js",
"system/controllers/controllerDisplayManager.js",
"system/controllers/handControllerGrabAndroid.js",
@@ -33,6 +32,10 @@ var DEFAULT_SCRIPTS_COMBINED = [
"developer/debugging/debugAndroidMouse.js"*/
];
+var DEBUG_SCRIPTS = [
+ "system/+android/stats.js"
+];
+
var DEFAULT_SCRIPTS_SEPARATE = [ ];
// add a menu item for debugging
@@ -70,6 +73,11 @@ function runDefaultsTogether() {
for (var i in DEFAULT_SCRIPTS_COMBINED) {
Script.include(DEFAULT_SCRIPTS_COMBINED[i]);
}
+ if (Script.isDebugMode()) {
+ for (var i in DEBUG_SCRIPTS) {
+ Script.include(DEBUG_SCRIPTS[i]);
+ }
+ }
loadSeparateDefaults();
}
@@ -77,6 +85,11 @@ function runDefaultsSeparately() {
for (var i in DEFAULT_SCRIPTS_COMBINED) {
Script.load(DEFAULT_SCRIPTS_COMBINED[i]);
}
+ if (Script.isDebugMode()) {
+ for (var i in DEBUG_SCRIPTS) {
+ Script.load(DEBUG_SCRIPTS[i]);
+ }
+ }
loadSeparateDefaults();
}
diff --git a/scripts/system/+android/stats.js b/scripts/system/+android/stats.js
index a93bcb5794..0731684291 100644
--- a/scripts/system/+android/stats.js
+++ b/scripts/system/+android/stats.js
@@ -30,7 +30,7 @@ function init() {
text: "STATS"
});
statsButton.clicked.connect(function() {
- Menu.triggerOption("Stats");
+ Menu.triggerOption("Show Statistics");
});
}
diff --git a/scripts/system/controllers/toggleAdvancedMovementForHandControllers.js b/scripts/system/controllers/toggleAdvancedMovementForHandControllers.js
index a1b96ac607..2f3a1d9628 100644
--- a/scripts/system/controllers/toggleAdvancedMovementForHandControllers.js
+++ b/scripts/system/controllers/toggleAdvancedMovementForHandControllers.js
@@ -171,4 +171,25 @@
Messages.subscribe(HIFI_ADVANCED_MOVEMENT_DISABLER_CHANNEL);
Messages.messageReceived.connect(handleMessage);
+ function initializeControls() {
+ if(HMD.active) {
+ if (Controller.Hardware.Vive !== undefined || Controller.Hardware.OculusTouch !== undefined) {
+ if (MyAvatar.useAdvancedMovementControls) {
+ Controller.disableMapping(DRIVING_MAPPING_NAME);
+ } else {
+ Controller.enableMapping(DRIVING_MAPPING_NAME);
+ }
+
+ if (MyAvatar.getFlyingEnabled()) {
+ Controller.disableMapping(FLYING_MAPPING_NAME);
+ } else {
+ Controller.enableMapping(FLYING_MAPPING_NAME);
+ }
+ });
+
+ }
+ }
+
+ initializeControls();
+
}()); // END LOCAL_SCOPE
diff --git a/tests-manual/shaders/src/main.cpp b/tests-manual/shaders/src/main.cpp
index cce5b0678b..67cb58182c 100644
--- a/tests-manual/shaders/src/main.cpp
+++ b/tests-manual/shaders/src/main.cpp
@@ -134,12 +134,12 @@ const std::string PIXEL_SHADER_DEFINES{ R"GLSL(
void testShaderBuild(const std::string& vs_src, const std::string& fs_src) {
std::string error;
- std::vector binary;
GLuint vs, fs;
- if (!gl::compileShader(GL_VERTEX_SHADER, vs_src, VERTEX_SHADER_DEFINES, vs, error) ||
- !gl::compileShader(GL_FRAGMENT_SHADER, fs_src, PIXEL_SHADER_DEFINES, fs, error)) {
+ if (!gl::compileShader(GL_VERTEX_SHADER, VERTEX_SHADER_DEFINES + vs_src, vs, error) ||
+ !gl::compileShader(GL_FRAGMENT_SHADER, PIXEL_SHADER_DEFINES + fs_src, fs, error)) {
throw std::runtime_error("Failed to compile shader");
}
+ gl::CachedShader binary;
auto pr = gl::compileProgram({ vs, fs }, error, binary);
if (!pr) {
throw std::runtime_error("Failed to link shader");
diff --git a/tests/animation/CMakeLists.txt b/tests/animation/CMakeLists.txt
index 40f76ee362..17999c4d8e 100644
--- a/tests/animation/CMakeLists.txt
+++ b/tests/animation/CMakeLists.txt
@@ -1,7 +1,7 @@
# Declare dependencies
macro (setup_testcase_dependencies)
# link in the shared libraries
- link_hifi_libraries(shared animation gpu fbx graphics networking)
+ link_hifi_libraries(shared animation gpu fbx graphics networking test-utils)
package_libraries_for_deployment()
endmacro ()
diff --git a/tests/animation/src/AnimInverseKinematicsTests.cpp b/tests/animation/src/AnimInverseKinematicsTests.cpp
index eba74726bb..f5d3597f56 100644
--- a/tests/animation/src/AnimInverseKinematicsTests.cpp
+++ b/tests/animation/src/AnimInverseKinematicsTests.cpp
@@ -16,7 +16,7 @@
#include
#include
-#include "../QTestExtensions.h"
+#include
QTEST_MAIN(AnimInverseKinematicsTests)
diff --git a/tests/animation/src/AnimTests.cpp b/tests/animation/src/AnimTests.cpp
index d758cc7a27..01c8d1c1b6 100644
--- a/tests/animation/src/AnimTests.cpp
+++ b/tests/animation/src/AnimTests.cpp
@@ -20,7 +20,7 @@
#include
#include
#include
-#include <../QTestExtensions.h>
+#include
QTEST_MAIN(AnimTests)
diff --git a/tests/animation/src/RotationConstraintTests.cpp b/tests/animation/src/RotationConstraintTests.cpp
index f828201a81..f8f94e8bee 100644
--- a/tests/animation/src/RotationConstraintTests.cpp
+++ b/tests/animation/src/RotationConstraintTests.cpp
@@ -17,7 +17,7 @@
#include
#include
-#include "../QTestExtensions.h"
+#include
QTEST_MAIN(RotationConstraintTests)
diff --git a/tests/gpu/src/ShaderLoadTest.cpp b/tests/gpu/src/ShaderLoadTest.cpp
new file mode 100644
index 0000000000..09752dc385
--- /dev/null
+++ b/tests/gpu/src/ShaderLoadTest.cpp
@@ -0,0 +1,287 @@
+//
+// Created by Bradley Austin Davis on 2018/01/11
+// Copyright 2014 High Fidelity, Inc.
+//
+// Distributed under the Apache License, Version 2.0.
+// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+#include "ShaderLoadTest.h"
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+QTEST_MAIN(ShaderLoadTest)
+
+extern std::atomic gpuBinaryShadersLoaded;
+
+extern const QString& getShaderCacheFile();
+
+
+QtMessageHandler originalHandler;
+
+void messageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message) {
+#if defined(Q_OS_WIN)
+ OutputDebugStringA(message.toStdString().c_str());
+ OutputDebugStringA("\n");
+#endif
+ originalHandler(type, context, message);
+}
+
+std::pair>> parseCachedShaderString(const QString& cachedShaderString) {
+
+ std::pair>> result;
+ {
+ static const QRegularExpression versionRegex("^// VERSION (\\d+)");
+ auto match = versionRegex.match(cachedShaderString);
+ result.first = match.captured(1).toInt();
+ }
+
+
+ int rangeStart = 0;
+ QString type;
+ static const QRegularExpression regex("//-------- (\\w+)");
+ auto match = regex.match(cachedShaderString, rangeStart);
+ while (match.hasMatch()) {
+ auto newType = match.captured(1);
+ auto start = match.capturedStart(0);
+ auto end = match.capturedEnd(0);
+ if (rangeStart != 0) {
+ QString subString = cachedShaderString.mid(rangeStart, start - rangeStart);
+ result.second.emplace_back(type, subString);
+ }
+ rangeStart = end;
+ type = newType;
+ match = regex.match(cachedShaderString, rangeStart);
+ }
+
+ if (rangeStart != 0) {
+ QString subString = cachedShaderString.mid(rangeStart);
+ result.second.emplace_back(type, subString);
+ }
+ return result;
+}
+
+std::string getShaderName(const QString& shader) {
+ static const QRegularExpression nameExp("//\\s+(\\w+\\.(?:vert|frag))");
+ auto match = nameExp.match(shader);
+ if (!match.hasMatch()) {
+ return (QCryptographicHash::hash(shader.toUtf8(), QCryptographicHash::Md5).toHex() + ".shader").toStdString();
+ }
+ return match.captured(1).trimmed().toStdString();
+}
+
+void ShaderLoadTest::randomizeShaderSources() {
+ for (auto& entry : _shaderSources) {
+ entry.second += ("\n//" + QUuid::createUuid().toString()).toStdString();
+ }
+}
+
+#if USE_LOCAL_SHADERS
+const QString SHADER_CACHE_FILENAME = "c:/Users/bdavi/AppData/Local/High Fidelity - dev/Interface/shaders/cache.json";
+static const QString SHADER_FOLDER = "D:/shaders/";
+void ShaderLoadTest::parseCacheDirectory() {
+ for (const auto& shaderFile : QDir(SHADER_FOLDER).entryList(QDir::Files)) {
+ QString shaderSource = FileUtils::readFile(SHADER_FOLDER + "/" + shaderFile);
+ _shaderSources[shaderFile.trimmed().toStdString()] = shaderSource.toStdString();
+ }
+
+ auto programsDoc = QJsonDocument::fromJson(FileUtils::readFile(SHADER_FOLDER + "programs.json").toUtf8());
+ for (const auto& programElement : programsDoc.array()) {
+ auto programObj = programElement.toObject();
+ QString vertexSource = programObj["vertex"].toString();
+ QString pixelSource = programObj["pixel"].toString();
+ _programs.insert({ vertexSource.toStdString(), pixelSource.toStdString() });
+ }
+}
+
+void ShaderLoadTest::persistCacheDirectory() {
+ for (const auto& shaderFile : QDir(SHADER_FOLDER).entryList(QDir::Files)) {
+ QFile(SHADER_FOLDER + "/" + shaderFile).remove();
+ }
+
+ // Write the shader source files
+ for (const auto& entry : _shaderSources) {
+ const QString name = entry.first.c_str();
+ const QString shader = entry.second.c_str();
+ QString fullFile = SHADER_FOLDER + name;
+ QVERIFY(!QFileInfo(fullFile).exists());
+ QFile shaderFile(fullFile);
+ shaderFile.open(QIODevice::WriteOnly);
+ shaderFile.write(shader.toUtf8());
+ shaderFile.close();
+ }
+
+ // Write the list of programs
+ {
+ QVariantList programsList;
+ for (const auto& program : _programs) {
+ QVariantMap programMap;
+ programMap["vertex"] = program.first.c_str();
+ programMap["pixel"] = program.second.c_str();
+ programsList.push_back(programMap);
+ }
+
+ QFile saveFile(SHADER_FOLDER + "programs.json");
+ saveFile.open(QFile::WriteOnly | QFile::Text | QFile::Truncate);
+ saveFile.write(QJsonDocument::fromVariant(programsList).toJson(QJsonDocument::Indented));
+ saveFile.close();
+ }
+}
+#else
+const QString SHADER_CACHE_FILENAME = ":cache.json";
+#endif
+
+void ShaderLoadTest::parseCacheFile() {
+ QString json = FileUtils::readFile(SHADER_CACHE_FILENAME);
+ auto root = QJsonDocument::fromJson(json.toUtf8()).object();
+ _programs.clear();
+ _programs.reserve(root.size());
+
+ const auto keys = root.keys();
+ Program program;
+ for (auto shaderKey : keys) {
+ auto cacheEntry = root[shaderKey].toObject();
+ auto source = cacheEntry["source"].toString();
+ auto shaders = parseCachedShaderString(source);
+ for (const auto& entry : shaders.second) {
+ const auto& type = entry.first;
+ const auto& source = entry.second;
+ const auto name = getShaderName(source);
+ if (name.empty()) {
+ continue;
+ }
+ if (0 == _shaderSources.count(name)) {
+ _shaderSources[name] = source.toStdString();
+ }
+ if (type == "vertex") {
+ program.first = name;
+ } else if (type == "pixel") {
+ program.second = name;
+ }
+ }
+ // FIXME support geometry / tesselation shaders eventually
+ if (program.first.empty() || program.second.empty()) {
+ qFatal("Bad Shader Setup");
+ }
+ _programs.insert(program);
+ }
+}
+
+bool ShaderLoadTest::buildProgram(const Program& programFiles) {
+ const auto& vertexName = programFiles.first;
+ const auto& vertexSource = _shaderSources[vertexName];
+ auto vertexShader = gpu::Shader::createVertex({ vertexSource });
+
+ const auto& pixelName = programFiles.second;
+ const auto& pixelSource = _shaderSources[pixelName];
+ auto pixelShader = gpu::Shader::createPixel({ pixelSource });
+
+ auto program = gpu::Shader::createProgram(vertexShader, pixelShader);
+ return gpu::gl::GLBackend::makeProgram(*program, {}, {});
+}
+
+void ShaderLoadTest::initTestCase() {
+ originalHandler = qInstallMessageHandler(messageHandler);
+ DependencyManager::set();
+ {
+ const auto& shaderCacheFile = getShaderCacheFile();
+ if (QFileInfo(shaderCacheFile).exists()) {
+ QFile(shaderCacheFile).remove();
+ }
+ }
+
+ // For local debugging
+#if USE_LOCAL_SHADERS
+ parseCacheFile();
+ persistCacheDirectory();
+ parseCacheDirectory();
+#else
+ parseCacheFile();
+#endif
+
+ // We use this to defeat shader caching both by the GPU backend
+ // and the OpenGL driver
+ randomizeShaderSources();
+
+ QVERIFY(!_programs.empty());
+ for (const auto& program : _programs) {
+ QVERIFY(_shaderSources.count(program.first) == 1);
+ QVERIFY(_shaderSources.count(program.second) == 1);
+ }
+
+ getDefaultOpenGLSurfaceFormat();
+ _canvas.create();
+ if (!_canvas.makeCurrent()) {
+ qFatal("Unable to make test GL context current");
+ }
+ gl::initModuleGl();
+ gpu::Context::init();
+ _canvas.makeCurrent();
+}
+
+void ShaderLoadTest::cleanupTestCase() {
+ DependencyManager::destroy();
+}
+
+void ShaderLoadTest::testShaderLoad() {
+ auto gpuContext = std::make_shared();
+ QVERIFY(gpuBinaryShadersLoaded == 0);
+
+ QElapsedTimer timer;
+
+ // Initial load of all the shaders
+ // No caching
+ {
+ timer.start();
+ for (const auto& program : _programs) {
+ QVERIFY(buildProgram(program));
+ }
+ qDebug() << "Uncached shader load took" << timer.elapsed() << "ms";
+ QVERIFY(gpuBinaryShadersLoaded == 0);
+ }
+ gpuContext->recycle();
+ glFinish();
+
+ // Reload the shaders within the same GPU context lifetime.
+ // Shaders will use the cached binaries in memory
+ {
+ timer.start();
+ for (const auto& program : _programs) {
+ QVERIFY(buildProgram(program));
+ }
+ qDebug() << "Cached shader load took" << timer.elapsed() << "ms";
+ QVERIFY(gpuBinaryShadersLoaded == _programs.size() * gpu::gl::GLShader::NumVersions);
+ }
+
+ // Simulate reloading the shader cache from disk by destroying and recreating the gpu context
+ // Shaders will use the cached binaries from disk
+ {
+ gpuBinaryShadersLoaded = 0;
+ gpuContext->recycle();
+ gpuContext->shutdown();
+ gpuContext.reset();
+ gpuContext = std::make_shared();
+ _canvas.makeCurrent();
+ timer.start();
+ for (const auto& program : _programs) {
+ QVERIFY(buildProgram(program));
+ }
+ qDebug() << "Cached shader load took" << timer.elapsed() << "ms";
+ QVERIFY(gpuBinaryShadersLoaded == _programs.size() * gpu::gl::GLShader::NumVersions);
+ }
+
+}
+
diff --git a/tests/gpu/src/ShaderLoadTest.h b/tests/gpu/src/ShaderLoadTest.h
new file mode 100644
index 0000000000..cfb01501b2
--- /dev/null
+++ b/tests/gpu/src/ShaderLoadTest.h
@@ -0,0 +1,63 @@
+//
+// Created by Bradley Austin Davis on 2018/05/08
+// Copyright 2013-2018 High Fidelity, Inc.
+//
+// Distributed under the Apache License, Version 2.0.
+// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+#pragma once
+
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#define USE_LOCAL_SHADERS 0
+
+namespace std {
+ template <>
+ struct hash> {
+ size_t operator()(const std::pair& a) const {
+ std::hash hasher;
+ return hasher(a.first) + hasher(a.second);
+ }
+ };
+
+}
+
+using ShadersByName = std::unordered_map;
+using Program = std::pair;
+using Programs = std::unordered_set;
+
+class ShaderLoadTest : public QObject {
+ Q_OBJECT
+
+private:
+
+ void parseCacheFile();
+#if USE_LOCAL_SHADERS
+ void parseCacheDirectory();
+ void persistCacheDirectory();
+#endif
+ bool buildProgram(const Program& program);
+ void randomizeShaderSources();
+
+private slots:
+ void initTestCase();
+ void cleanupTestCase();
+ void testShaderLoad();
+
+
+private:
+
+ ShadersByName _shaderSources;
+ Programs _programs;
+ QString _resourcesPath;
+ OffscreenGLCanvas _canvas;
+ const glm::uvec2 _size{ 640, 480 };
+};
diff --git a/tests/gpu/src/TextureTest.cpp b/tests/gpu/src/TextureTest.cpp
index c9229fb826..72fe1bfbfe 100644
--- a/tests/gpu/src/TextureTest.cpp
+++ b/tests/gpu/src/TextureTest.cpp
@@ -21,7 +21,7 @@
#include
#include
-#include "../../QTestExtensions.h"
+#include
QTEST_MAIN(TextureTest)
diff --git a/tests/gpu/src/cache.json b/tests/gpu/src/cache.json
new file mode 100644
index 0000000000..ccec6a430f
--- /dev/null
+++ b/tests/gpu/src/cache.json
@@ -0,0 +1,512 @@
+{
+ "+Fz1iUM9mAnsczReJey3+g==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_transparent_textured_unlit.frag\n// fragment shader\n//\n// Created by Sam Gateau on 4/3/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\nin vec4 _color;\nin vec2 _texCoord0;\n\nlayout(location = 0) out vec4 _fragColor0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0.st);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n _fragColor0 = vec4(_color.rgb * texel.rgb, colorAlpha * texel.a);\n}\n\n"
+ },
+ "+M1tIpq5A7dS3nFg+gZJNQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// directional_ambient_light.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 9/3/14.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n\n\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n\n\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n\n\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 normal, vec3 albedo, vec3 lightmap) {\n Light light = getKeyLight();\n LightAmbient ambient = getLightAmbient();\n\n\n\n // Catch normals perpendicular to the projection plane, hence the magic number for the threshold\n // It should be just 0, but we have inaccuracy so we overshoot\n const float PERPENDICULAR_THRESHOLD = -0.005;\n vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0)); // transform to worldspace\n float diffuseDot = dot(fragNormal, -getLightDirection(light));\n float facingLight = step(PERPENDICULAR_THRESHOLD, diffuseDot); \n\n // Reevaluate the shadow attenuation for light facing fragments\n float lightAttenuation = (1.0 - facingLight) + facingLight * shadowAttenuation;\n\n // Diffuse light is the lightmap dimmed by shadow\n vec3 diffuseLight = lightAttenuation * lightmap;\n\n // Ambient light is the lightmap when in shadow\n vec3 ambientLight = (1.0 - lightAttenuation) * lightmap * getLightAmbientIntensity(ambient);\n\n return isLightmapEnabled() * obscurance * albedo * (diffuseLight + ambientLight);\n}\n\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nfloat curvatureAO(in float k) {\n return 1.0f - (0.0022f * k * k) + (0.0776f * k) + 0.7369f;\n}\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nif (scattering * isScatteringEnabled() > 0.0) {\n float ambientOcclusion = curvatureAO(lowNormalCurvature.w * 20.0f) * 0.5f;\n float ambientOcclusionHF = curvatureAO(midNormalCurvature.w * 8.0f) * 0.5f;\n ambientOcclusion = min(ambientOcclusion, ambientOcclusionHF);\n\n obscurance = min(obscurance, ambientOcclusion);\n\n // Diffuse from ambient\n diffuse = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceLowNormal).xyz;\n\n // Scattering ambient specular is the same as non scattering for now\n // TODO: we should use the same specular answer as for direct lighting\n }\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\nvec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,\nvec3 albedo, vec3 fresnel, float metallic, float roughness\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += ambientDiffuse;\n color += ambientSpecular;\n\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += directionalDiffuse;\n color += directionalSpecular;\n\n return color;\n}\n\n\n\n\nlayout(location = 0) in vec2 _texCoord0;\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);\n\n float shadowAttenuation = 1.0;\n\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n } else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {\n discard;\n } else {\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);\n }\n\n vec3 color = evalAmbientSphereGlobalColor(\n getViewInverse(),\n shadowAttenuation,\n frag.obscurance,\n frag.position.xyz,\n frag.normal,\n frag.albedo,\n frag.fresnel,\n frag.metallic,\n frag.roughness,\n frag.scattering,\n midNormalCurvature,\n lowNormalCurvature);\n _fragColor = vec4(color, 1.0);\n\n }\n}\n\n\n"
+ },
+ "+mpNVPHhcBoKZ74Tq3vNoA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/04/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// Generated on Wed May 23 14:24:07 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nout vec4 _fadeData1;\nout vec4 _fadeData2;\nout vec4 _fadeData3;\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\nout vec4 _positionWS;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n _fadeData1 = inTexCoord2;\n _fadeData2 = inTexCoord3;\n _fadeData3 = inTexCoord4; \n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_transparent_textured_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n\n\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n\n\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n\n\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n\n\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n// the albedo texture\nuniform sampler2D originalTexture;\n\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\nin vec4 _positionWS;\n\n// Declare after all samplers to prevent sampler location mix up with originalTexture\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nin vec4 _fadeData1;\nin vec4 _fadeData2;\nin vec4 _fadeData3;\n\n\n\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = int(_fadeData1.w);\n fadeParams.threshold = _fadeData2.w;\n fadeParams.noiseOffset = _fadeData1.xyz;\n fadeParams.baseOffset = _fadeData2.xyz;\n fadeParams.baseInvSize = _fadeData3.xyz;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n vec4 texel = texture(originalTexture, _texCoord0.st);\n\n\n float opacity = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n opacity = -_color.a;\n }\n opacity *= texel.a;\n vec3 albedo = _color.rgb * texel.rgb;\n\n vec3 fragPosition = _positionES.xyz;\n vec3 fragNormal = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n\n _fragColor0 = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n 1.0,\n fragPosition,\n fragNormal,\n albedo,\n DEFAULT_FRESNEL,\n 0.0f,\n fadeEmissive,\n DEFAULT_ROUGHNESS,\n opacity),\n opacity);\n\n}\n\n"
+ },
+ "/9KkHjVgFwvEDKxSPMy6/Q==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_translucent_normal_map.frag\n// fragment shader\n//\n// Created by Olivier Prat on 23/01/2018.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _tangentWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n int matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "/QdKNQu/OZW/wZUMgFCFUw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_lightmap.vert\n// vertex shader\n//\n// Created by Sam Gateau on 11/21/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\n\nvoid main(void) {\n // pass along the color in linear space\n _color = color_sRGBToLinear(inColor.xyz);\n\n // and the texture coordinates\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord1.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_lightmap.frag\n// fragment shader\n//\n// Created by Samuel Gateau on 11/19/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\n#endif\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nuniform sampler2D emissiveMap;\nvec3 fetchLightmapMap(vec2 uv) {\n vec2 emissiveParams = getTexMapArray()._lightmapParams.xy;\n return (vec3(emissiveParams.x) + emissiveParams.y * texture(emissiveMap, uv).rgb);\n}\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedo = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\n\n vec3 lightmapVal = fetchLightmapMap(_texCoord1);\n\n\n packDeferredFragmentLightmap(\n normalize(_normalWS), \n evalOpaqueFinalAlpha(getMaterialOpacity(mat), albedo.a),\n getMaterialAlbedo(mat) * albedo.rgb * _color,\n getMaterialRoughness(mat) * roughness,\n getMaterialMetallic(mat) * metallicTex,\n /*metallicTex, // no use of */getMaterialFresnel(mat),\n lightmapVal);\n}\n\n\n"
+ },
+ "/ap5E6Wo+pKIeUmfBkSlXQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "/nm10YJuYOiret6AZ4H4QA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "04pa0ArsTf+wloCY22ms2A==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// skin_model_shadow_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\n\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "0F+ugd1jfwUnFqhMX+mMCQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// Haze.frag\n//\n// Created by Nissim Hadar on 9/5/2107.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n\n\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n\n\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n\n\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nuniform sampler2D colorMap;\nuniform sampler2D linearDepthMap;\n\nvec4 unpackPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearDepthMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nvoid main(void) {\n if ((isHazeEnabled() == 0.0) || (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) != HAZE_MODE_IS_ACTIVE) {\n discard;\n }\n\n vec4 fragColor = texture(colorMap, varTexCoord0);\n vec4 fragPositionES = unpackPositionFromZeye(varTexCoord0);\n\n mat4 viewInverse = getViewInverse();\n vec4 fragPositionWS = viewInverse * fragPositionES;\n vec4 eyePositionWS = viewInverse[3];\n\n Light light = getKeyLight();\n vec3 lightDirectionWS = getLightDirection(light);\n\n outFragColor = computeHazeColor(fragColor, fragPositionES.xyz, fragPositionWS.xyz, eyePositionWS.xyz, lightDirectionWS);\n}\n\n\n"
+ },
+ "0RqDfeTpMx45eqgktNgpbw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n// overlay3D_model.frag\n// fragment shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n\n\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,\n vec3 albedo, vec3 fresnel, float metallic, float roughness\n) {\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance\n);\n color += ambientDiffuse;\n color += ambientSpecular;\n\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation\n);\n color += directionalDiffuse;\n color += directionalSpecular;\n\n // Attenuate the light if haze effect selected\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_KEYLIGHT_ATTENUATED) == HAZE_MODE_IS_KEYLIGHT_ATTENUATED) {\n color = computeHazeColorKeyLightAttenuation(color, lightDirection, fragPositionWS); \n }\n\n return color;\n}\n\n\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n\n vec3 fragPosition = _positionES.xyz;\n\n TransformCamera cam = getTransformCamera();\n\n vec4 color = vec4(evalSkyboxGlobalColor(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPosition,\n normalize(_normalWS),\n albedo,\n fresnel,\n metallic,\n roughness),\n opacity);\n\n // And emissive\n color.rgb += emissive * isEmissiveEnabled();\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n"
+ },
+ "0hQ0Rb3K0WDqHW53Tf8Kuw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_unlit_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\n#endif\n\n\n\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _color;\nlayout(location = 6) in float _alpha;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n albedo += fadeEmissive;\n packDeferredFragmentUnlit(\n normalize(_normalWS), \n opacity,\n albedo * isUnlitEnabled());\n}\n\n\n"
+ },
+ "0n2jal6anoYiLyvuCgYbqg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_specular_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "15zRKjBz9e2kFmwfqMmhNw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// material_opaque_unlit.frag\n// fragment shader\n//\n// Created by Sam Gateau on 5/5/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\n#endif\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\nlayout(location = 5) in float _alpha;\n\nvoid main(void) {\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n packDeferredFragmentUnlit(\n normalize(_normalWS), \n opacity,\n albedo * isUnlitEnabled());\n}\n\n\n"
+ },
+ "17ummFeJSrtyo7Ny+ynRfg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_textured.frag\n// fragment shader\n//\n// Created by Clement Brisset on 5/29/15.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0);\n\n packDeferredFragment(\n normalize(_normalWS),\n 1.0,\n _color.rgb * texel.rgb,\n DEFAULT_ROUGHNESS,\n DEFAULT_METALLIC,\n DEFAULT_EMISSIVE,\n DEFAULT_OCCLUSION,\n DEFAULT_SCATTERING);\n}\n\n"
+ },
+ "1fGQdakrI/l6BLcnFcIp+Q==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_translucent.frag\n// fragment shader\n//\n// Created by Sam Gateau on 2/15/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n\n\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "1rsaUxw8y7WC59ipAoYd4w==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _tangentWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "27ckrjp2miPxgJclleP5Jg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// skin_model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/08/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "2GBtYhe6qps7934dzbucbA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// surfaceGeometry_downsampleDepthNormal.frag\n//\n// Created by Sam Gateau on 6/3/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\nuniform sampler2D linearDepthMap;\nuniform sampler2D normalMap;\n\nin vec2 varTexCoord0;\n\nlayout(location = 0) out vec4 outLinearDepth;\nlayout(location = 1) out vec4 outNormal;\n\nvoid main(void) {\n // Gather 2 by 2 quads from texture and downsample\n\n // Try different filters for Z\n vec4 Zeyes = textureGather(linearDepthMap, varTexCoord0, 0);\n // float Zeye = texture(linearDepthMap, varTexCoord0).x;\n\n vec4 rawNormalsX = textureGather(normalMap, varTexCoord0, 0);\n vec4 rawNormalsY = textureGather(normalMap, varTexCoord0, 1);\n vec4 rawNormalsZ = textureGather(normalMap, varTexCoord0, 2);\n\n float Zeye = min(min(Zeyes.x, Zeyes.y), min(Zeyes.z, Zeyes.w));\n\n vec3 normal = vec3(0.0);\n normal += unpackNormal(vec3(rawNormalsX[0], rawNormalsY[0], rawNormalsZ[0]));\n normal += unpackNormal(vec3(rawNormalsX[1], rawNormalsY[1], rawNormalsZ[1]));\n normal += unpackNormal(vec3(rawNormalsX[2], rawNormalsY[2], rawNormalsZ[2]));\n normal += unpackNormal(vec3(rawNormalsX[3], rawNormalsY[3], rawNormalsZ[3]));\n\n normal = normalize(normal);\n\n outLinearDepth = vec4(Zeye, 0.0, 0.0, 0.0);\n outNormal = vec4((normal + vec3(1.0)) * 0.5, 0.0);\n}\n\n\n\n"
+ },
+ "2TRJ08daYh/TDKtgiJHRnQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// local_lights_drawOutline.frag\n// fragment shader\n//\n// Created by Sam Gateau on 9/6/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about deferred buffer\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\n\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\n\n// Everything about light\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform lightBuffer {\n Light lightArray[128];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n\n\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n\n\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nbool evalLightPointEdge(out vec3 color, Light light, vec4 fragLightDirLen, vec3 fragEyeDir) {\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n // Show edges\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n color = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n\n return (edge < 1.0);\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nbool evalLightSpotEdge(out vec3 color, Light light, vec4 fragLightDirLen, float cosSpotAngle, vec3 fragEyeDir) {\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n \n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n color = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n\n return (edge < 1.0);\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n\n\n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nin vec2 _texCoord0;\nout vec4 _fragColor;\n\nvoid main(void) {\n\n // Grab the fragment data from the uv\n vec2 texCoord = _texCoord0.st;\n\n vec4 fragPosition = unpackDeferredPositionFromZeye(texCoord);\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texCoord);\n\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n }\n\n frag.position = fragPosition;\n\n\n // Frag pos in world\n mat4 invViewMat = getViewInverse();\n vec4 fragPos = invViewMat * fragPosition;\n \n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(fragPos);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (!hasLocalLights(numLights, clusterPos, dims)) {\n discard;\n }\n\n // Frag to eye vec\n vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);\n vec3 fragEyeDir = normalize(fragEyeVector.xyz);\n _fragColor = vec4(0, 0, 0, 1);\n \n int numLightTouching = 0;\n int lightClusterOffset = cluster.z;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(frag.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n vec3 color = vec3(0.0);\n if (evalLightPointEdge(color, light, fragLightDirLen, fragEyeDir)) {\n _fragColor.rgb += color;\n }\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(frag.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 color = vec3(0.0);\n\n if (evalLightSpotEdge(color, light, fragLightDirLen, cosSpotAngle, fragEyeDir)) {\n _fragColor.rgb += color;\n }\n }\n\n}\n\n\n\n"
+ },
+ "2cHHDMdotyEYUK/0bGw5Yg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/04/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// Generated on Wed May 23 14:24:07 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nout vec4 _fadeData1;\nout vec4 _fadeData2;\nout vec4 _fadeData3;\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\nout vec4 _positionWS;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n _fadeData1 = inTexCoord2;\n _fadeData2 = inTexCoord3;\n _fadeData3 = inTexCoord4; \n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_textured_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n\n\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n\n\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\nin vec4 _positionWS;\n\n// Declare after all samplers to prevent sampler location mix up with originalTexture\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nin vec4 _fadeData1;\nin vec4 _fadeData2;\nin vec4 _fadeData3;\n\n\n\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = int(_fadeData1.w);\n fadeParams.threshold = _fadeData2.w;\n fadeParams.noiseOffset = _fadeData1.xyz;\n fadeParams.baseOffset = _fadeData2.xyz;\n fadeParams.baseInvSize = _fadeData3.xyz;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n vec4 texel = texture(originalTexture, _texCoord0);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n\n const float ALPHA_THRESHOLD = 0.999;\n if (colorAlpha * texel.a < ALPHA_THRESHOLD) {\n packDeferredFragmentTranslucent(\n normalize(_normalWS),\n colorAlpha * texel.a,\n _color.rgb * texel.rgb + fadeEmissive,\n DEFAULT_FRESNEL,\n DEFAULT_ROUGHNESS);\n } else {\n packDeferredFragment(\n normalize(_normalWS),\n 1.0,\n _color.rgb * texel.rgb,\n DEFAULT_ROUGHNESS,\n DEFAULT_METALLIC,\n DEFAULT_EMISSIVE + fadeEmissive,\n DEFAULT_OCCLUSION,\n DEFAULT_SCATTERING);\n }\n}\n\n"
+ },
+ "3EPmuQdbeP4bvyeB9Ml2VQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// overlay3D.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 _color;\nout float _alpha;\nout vec2 _texCoord0;\nout vec4 _positionES;\nout vec3 _normalWS;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n _texCoord0 = inTexCoord0.st;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// overlay3D_translucent.frag\n// fragment shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n\n\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n\n\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\nvec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 fresnel, float roughness, float opacity) {\n\n // Need the light now\n Light light = getKeyLight();\n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n LightAmbient ambient = getLightAmbient();\n\n TransformCamera cam = getTransformCamera();\n vec3 fragEyeVectorView = normalize(-position);\n vec3 fragEyeDir;\n { // transformEyeToWorldDir\n fragEyeDir = vec3(cam._viewInverse * vec4(fragEyeVectorView.xyz, 0.0));\n }\n\n\n SurfaceData surface = initSurfaceData(roughness, normal, fragEyeDir);\n\n vec3 color = opacity * albedo * getLightColor(light) * getLightAmbientIntensity(ambient);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += directionalSpecular / opacity;\n\n return vec4(color, opacity);\n}\n\nuniform sampler2D originalTexture;\n\nin vec2 _texCoord0;\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec4 albedo = texture(originalTexture, _texCoord0);\n\n vec3 fragPosition = _positionES.xyz;\n vec3 fragNormal = normalize(_normalWS);\n vec3 fragAlbedo = albedo.rgb * _color;\n float fragMetallic = 0.0;\n vec3 fragSpecular = vec3(0.1);\n float fragRoughness = 0.9;\n float fragOpacity = albedo.a * _alpha;\n\n vec4 color = evalGlobalColor(1.0,\n fragPosition,\n fragNormal,\n fragAlbedo,\n fragMetallic,\n fragSpecular,\n fragRoughness,\n fragOpacity);\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n\n"
+ },
+ "3kmbrrZgeHWosgUewHePlw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// overlay3D.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 _color;\nout float _alpha;\nout vec2 _texCoord0;\nout vec4 _positionES;\nout vec3 _normalWS;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n _texCoord0 = inTexCoord0.st;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// overlay3D_translucent.frag\n// fragment shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n\n\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n\n\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\nvec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 fresnel, float roughness, float opacity) {\n\n // Need the light now\n Light light = getKeyLight();\n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n LightAmbient ambient = getLightAmbient();\n\n TransformCamera cam = getTransformCamera();\n vec3 fragEyeVectorView = normalize(-position);\n vec3 fragEyeDir;\n { // transformEyeToWorldDir\n fragEyeDir = vec3(cam._viewInverse * vec4(fragEyeVectorView.xyz, 0.0));\n }\n\n\n SurfaceData surface = initSurfaceData(roughness, normal, fragEyeDir);\n\n vec3 color = opacity * albedo * getLightColor(light) * getLightAmbientIntensity(ambient);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += directionalSpecular / opacity;\n\n return vec4(color, opacity);\n}\n\nuniform sampler2D originalTexture;\n\nin vec2 _texCoord0;\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec4 albedo = texture(originalTexture, _texCoord0);\n\n vec3 fragPosition = _positionES.xyz;\n vec3 fragNormal = normalize(_normalWS);\n vec3 fragAlbedo = albedo.rgb * _color;\n float fragMetallic = 0.0;\n vec3 fragSpecular = vec3(0.1);\n float fragRoughness = 0.9;\n float fragOpacity = albedo.a * _alpha;\n\n vec4 color = evalGlobalColor(1.0,\n fragPosition,\n fragNormal,\n fragAlbedo,\n fragMetallic,\n fragSpecular,\n fragRoughness,\n fragOpacity);\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n\n"
+ },
+ "4/cBJggWTLzlkWDb3Cvryw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// standardTransformPNTC.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/10/2015.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 varPosition;\nout vec3 varNormal;\nout vec2 varTexCoord0;\nout vec4 varColor;\n\nvoid main(void) {\n varTexCoord0 = inTexCoord0.st;\n varColor = color_sRGBAToLinear(inColor);\n \n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n varNormal = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n varPosition = inPosition.xyz;\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n// standardDrawTexture.frag\n// fragment shader\n//\n// Created by Sam Gateau on 6/10/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// the texture\nuniform sampler2D colorMap;\n\nin vec3 varPosition;\nin vec3 varNormal;\nin vec2 varTexCoord0;\nin vec4 varColor;\n\nout vec4 outFragColor;\n\nvoid main(void) {\n vec4 color = texture(colorMap, varTexCoord0);\n outFragColor = color * varColor;\n}\n\n\n"
+ },
+ "43rQzjoR7m7l96PqypljMw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n\n\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive+fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "47eZoUUS5qAZqfH+ipeA0w==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n\n\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n// model_translucent_fade.frag\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n\n\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\n\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n fragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "4i7CpQHClwhzhQ13dtFmww==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n\n\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _tangentWS;\nlayout(location = 6) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive + fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "4zhj1JuoZvGR1LCXTOVenA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_textured.frag\n// fragment shader\n//\n// Created by Clement Brisset on 5/29/15.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0);\n\n packDeferredFragment(\n normalize(_normalWS),\n 1.0,\n _color.rgb * texel.rgb,\n DEFAULT_ROUGHNESS,\n DEFAULT_METALLIC,\n DEFAULT_EMISSIVE,\n DEFAULT_OCCLUSION,\n DEFAULT_SCATTERING);\n}\n\n"
+ },
+ "58OO8MGSlS2k1Qn10mamwQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n\n\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_translucent.frag\n// fragment shader\n//\n// Created by Sam Gateau on 2/15/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n\n\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "5bcd/ODHdGbVBs/F+T2X8A==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// material_opaque_unlit.frag\n// fragment shader\n//\n// Created by Sam Gateau on 5/5/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\n#endif\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\nlayout(location = 5) in float _alpha;\n\nvoid main(void) {\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n packDeferredFragmentUnlit(\n normalize(_normalWS), \n opacity,\n albedo * isUnlitEnabled());\n}\n\n\n"
+ },
+ "5iiY0vIxLsCSpgiFabIarw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_unlit_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\n#endif\n\n\n\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _color;\nlayout(location = 6) in float _alpha;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n albedo += fadeEmissive;\n packDeferredFragmentUnlit(\n normalize(_normalWS), \n opacity,\n albedo * isUnlitEnabled());\n}\n\n\n"
+ },
+ "5l+NrW5M5EYQwM35VGiwnQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// forward_simple_textured_transparent.frag\n// fragment shader\n//\n// Created by Clement Brisset on 5/29/15.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n\n\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n\n\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n\n\n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlended(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) {\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n // FIXME - temporarily removed until we support it for forward...\n /* if ((hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }*/\n\n return color;\n}\n\n\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nlayout(location = 0) in vec3 _normalWS;\nlayout(location = 1) in vec4 _color;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec4 _positionES;\n\nlayout(location = 0) out vec4 _fragColor0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0);\n float colorAlpha = _color.a * texel.a;\n\n TransformCamera cam = getTransformCamera();\n vec3 fragPosition = _positionES.xyz;\n\n _fragColor0 = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n DEFAULT_OCCLUSION,\n fragPosition,\n normalize(_normalWS),\n _color.rgb * texel.rgb,\n DEFAULT_FRESNEL,\n DEFAULT_METALLIC,\n DEFAULT_EMISSIVE,\n DEFAULT_ROUGHNESS, colorAlpha),\n colorAlpha);\n}\n\n"
+ },
+ "6jeBT9ZR8yCpxEdAcd36xA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// local_lights_drawOutline.frag\n// fragment shader\n//\n// Created by Sam Gateau on 9/6/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about deferred buffer\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\n\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\n\n// Everything about light\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform lightBuffer {\n Light lightArray[128];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n\n\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n\n\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nbool evalLightPointEdge(out vec3 color, Light light, vec4 fragLightDirLen, vec3 fragEyeDir) {\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n // Show edges\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n color = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n\n return (edge < 1.0);\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nbool evalLightSpotEdge(out vec3 color, Light light, vec4 fragLightDirLen, float cosSpotAngle, vec3 fragEyeDir) {\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n \n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n color = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n\n return (edge < 1.0);\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n\n\n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nin vec2 _texCoord0;\nout vec4 _fragColor;\n\nvoid main(void) {\n\n // Grab the fragment data from the uv\n vec2 texCoord = _texCoord0.st;\n\n vec4 fragPosition = unpackDeferredPositionFromZeye(texCoord);\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texCoord);\n\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n }\n\n frag.position = fragPosition;\n\n\n // Frag pos in world\n mat4 invViewMat = getViewInverse();\n vec4 fragPos = invViewMat * fragPosition;\n \n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(fragPos);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (!hasLocalLights(numLights, clusterPos, dims)) {\n discard;\n }\n\n // Frag to eye vec\n vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);\n vec3 fragEyeDir = normalize(fragEyeVector.xyz);\n _fragColor = vec4(0, 0, 0, 1);\n \n int numLightTouching = 0;\n int lightClusterOffset = cluster.z;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(frag.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n vec3 color = vec3(0.0);\n if (evalLightPointEdge(color, light, fragLightDirLen, fragEyeDir)) {\n _fragColor.rgb += color;\n }\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(frag.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 color = vec3(0.0);\n\n if (evalLightSpotEdge(color, light, fragLightDirLen, cosSpotAngle, fragEyeDir)) {\n _fragColor.rgb += color;\n }\n }\n\n}\n\n\n\n"
+ },
+ "6m/vijX36wYDyY83TmJybg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Sat Oct 24 09:34:37 2015\n//\n// toneMapping.frag\n//\n// Draw texture 0 fetched at texcoord.xy\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct ToneMappingParams {\n vec4 _exp_2powExp_s0_s1;\n ivec4 _toneCurve_s0_s1_s2;\n};\n\nconst float INV_GAMMA_22 = 1.0 / 2.2;\nconst int ToneCurveNone = 0;\nconst int ToneCurveGamma22 = 1;\nconst int ToneCurveReinhard = 2;\nconst int ToneCurveFilmic = 3;\n\nuniform toneMappingParamsBuffer {\n ToneMappingParams params;\n};\nfloat getTwoPowExposure() {\n return params._exp_2powExp_s0_s1.y;\n}\nint getToneCurve() {\n return params._toneCurve_s0_s1_s2.x;\n}\n\nuniform sampler2D colorMap;\n \nin vec2 varTexCoord0;\nout vec4 outFragColor;\n \nvoid main(void) {\n vec4 fragColorRaw = texture(colorMap, varTexCoord0);\n vec3 fragColor = fragColorRaw.xyz;\n\n vec3 srcColor = fragColor * getTwoPowExposure();\n\n int toneCurve = getToneCurve();\n vec3 tonedColor = srcColor;\n if (toneCurve == ToneCurveFilmic) {\n vec3 x = max(vec3(0.0), srcColor-0.004);\n tonedColor = (x * (6.2 * x + 0.5)) / (x * (6.2 * x + 1.7) + 0.06);\n } else if (toneCurve == ToneCurveReinhard) {\n tonedColor = srcColor/(1.0 + srcColor);\n tonedColor = pow(tonedColor, vec3(INV_GAMMA_22));\n } else if (toneCurve == ToneCurveGamma22) {\n tonedColor = pow(srcColor, vec3(INV_GAMMA_22));\n } // else None toned = src\n\n outFragColor = vec4(tonedColor, 1.0);\n}\n\n\n"
+ },
+ "7FNQjpsWLg0f0vQMLCi6Wg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// fxaa_blend.frag\n// fragment shader\n//\n// Created by Raffi Bedikian on 8/30/15\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nuniform sampler2D colorTexture;\nuniform float sharpenIntensity;\n\nvoid main(void) {\n vec4 pixels[9];\n vec4 sharpenedPixel;\n pixels[0] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(-1,-1), 0);\n pixels[1] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(0,-1), 0);\n pixels[2] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(1,-1), 0);\n\n pixels[3] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(-1,0), 0);\n pixels[4] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy), 0);\n pixels[5] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(1,0), 0);\n\n pixels[6] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(-1,1), 0);\n\n\n pixels[7] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(0,1), 0);\n pixels[8] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(1,1), 0);\n\n sharpenedPixel = pixels[4]*6.8 - (pixels[1]+pixels[3]+pixels[5]+pixels[7]) - (pixels[0]+pixels[2]+pixels[6]+pixels[8])*0.7;\n\n\tvec4 minColor = max(vec4(0), pixels[4]-vec4(0.5));\n\tvec4 maxColor = pixels[4]+vec4(0.5);\n outFragColor = clamp(pixels[4] + sharpenedPixel * sharpenIntensity, minColor, maxColor);\n}\n\n\n"
+ },
+ "7IV5hopxBctOtJIAvRJeZw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_translucent_normal_map.vert\n// vertex shader\n//\n// Created by Olivier Prat on 23/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout float _alpha;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec4 _positionES;\nout vec4 _positionWS;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _tangentWS = vec3(dot(mr0, inTangent.xyz), dot(mr1, inTangent.xyz), dot(mr2, inTangent.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_translucent_normal_map.frag\n// fragment shader\n//\n// Created by Olivier Prat on 23/01/2018.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _tangentWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n int matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "7bsWJwqr6TCCCD7GT/odNw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/04/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// Generated on Wed May 23 14:24:07 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nout vec4 _fadeData1;\nout vec4 _fadeData2;\nout vec4 _fadeData3;\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\nout vec4 _positionWS;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n _fadeData1 = inTexCoord2;\n _fadeData2 = inTexCoord3;\n _fadeData3 = inTexCoord4; \n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_textured_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n\n\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n\n\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\nin vec4 _positionWS;\n\n// Declare after all samplers to prevent sampler location mix up with originalTexture\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nin vec4 _fadeData1;\nin vec4 _fadeData2;\nin vec4 _fadeData3;\n\n\n\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = int(_fadeData1.w);\n fadeParams.threshold = _fadeData2.w;\n fadeParams.noiseOffset = _fadeData1.xyz;\n fadeParams.baseOffset = _fadeData2.xyz;\n fadeParams.baseInvSize = _fadeData3.xyz;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n vec4 texel = texture(originalTexture, _texCoord0);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n\n const float ALPHA_THRESHOLD = 0.999;\n if (colorAlpha * texel.a < ALPHA_THRESHOLD) {\n packDeferredFragmentTranslucent(\n normalize(_normalWS),\n colorAlpha * texel.a,\n _color.rgb * texel.rgb + fadeEmissive,\n DEFAULT_FRESNEL,\n DEFAULT_ROUGHNESS);\n } else {\n packDeferredFragment(\n normalize(_normalWS),\n 1.0,\n _color.rgb * texel.rgb,\n DEFAULT_ROUGHNESS,\n DEFAULT_METALLIC,\n DEFAULT_EMISSIVE + fadeEmissive,\n DEFAULT_OCCLUSION,\n DEFAULT_SCATTERING);\n }\n}\n\n"
+ },
+ "7clizhBrq2Q7sao4iat75A==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n\n\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _tangentWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "7rT0hub5yYofFRvlIiiWgw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n\n// OpenGLDisplayPlugin_present.frag\n\nuniform sampler2D colorMap;\n\nin vec2 varTexCoord0;\n\nout vec4 outFragColor;\n\nfloat sRGBFloatToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n\n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 colorToLinearRGB(vec3 srgb) {\n return vec3(sRGBFloatToLinear(srgb.r), sRGBFloatToLinear(srgb.g), sRGBFloatToLinear(srgb.b));\n}\n\nvoid main(void) {\n outFragColor.a = 1.0;\n outFragColor.rgb = colorToLinearRGB(texture(colorMap, varTexCoord0).rgb);\n}\n\n"
+ },
+ "7wSgkilDDLl77uiwvnvqbA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/04/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// Generated on Wed May 23 14:24:07 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nout vec4 _fadeData1;\nout vec4 _fadeData2;\nout vec4 _fadeData3;\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\nout vec4 _positionWS;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n _fadeData1 = inTexCoord2;\n _fadeData2 = inTexCoord3;\n _fadeData3 = inTexCoord4; \n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_textured_unlit_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n\n\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n\n\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\nin vec4 _positionWS;\n\n// Declare after all samplers to prevent sampler location mix up with originalTexture\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nin vec4 _fadeData1;\nin vec4 _fadeData2;\nin vec4 _fadeData3;\n\n\n\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = int(_fadeData1.w);\n fadeParams.threshold = _fadeData2.w;\n fadeParams.noiseOffset = _fadeData1.xyz;\n fadeParams.baseOffset = _fadeData2.xyz;\n fadeParams.baseInvSize = _fadeData3.xyz;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n vec4 texel = texture(originalTexture, _texCoord0.st);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n\n const float ALPHA_THRESHOLD = 0.999;\n if (colorAlpha * texel.a < ALPHA_THRESHOLD) {\n packDeferredFragmentTranslucent(\n normalize(_normalWS),\n colorAlpha * texel.a,\n _color.rgb * texel.rgb+fadeEmissive,\n DEFAULT_FRESNEL,\n DEFAULT_ROUGHNESS);\n } else {\n packDeferredFragmentUnlit(\n normalize(_normalWS),\n 1.0,\n _color.rgb * texel.rgb+fadeEmissive);\n }\n}\n\n"
+ },
+ "9UNlgMJxCPmCgXyqfgXC/w==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// skin_model_shadow_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\n\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "9bKFfOQuaMZsiHJwa80zAA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n\n\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_specular_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "9nR6MFNx+/46oTyCg6ThIQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "9x28Xrw/xEV/kVPwTv9CHA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTransformUnitQuad.vert\n// \n// Draw and transform the unit quad [-1,-1 -> 1,1]\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * pos);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTexture.frag\n//\n// Draw texture 0 fetched at texcoord.xy\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nuniform sampler2D colorMap;\n\nlayout(location = 0) in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = texture(colorMap, varTexCoord0);\n}\n\n\n"
+ },
+ "ACXHB/kbeWO+idEVbQjvtg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/04/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// Generated on Wed May 23 14:24:07 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nout vec4 _fadeData1;\nout vec4 _fadeData2;\nout vec4 _fadeData3;\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\nout vec4 _positionWS;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n _fadeData1 = inTexCoord2;\n _fadeData2 = inTexCoord3;\n _fadeData3 = inTexCoord4; \n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_textured_unlit_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n\n\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n\n\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\nin vec4 _positionWS;\n\n// Declare after all samplers to prevent sampler location mix up with originalTexture\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nin vec4 _fadeData1;\nin vec4 _fadeData2;\nin vec4 _fadeData3;\n\n\n\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = int(_fadeData1.w);\n fadeParams.threshold = _fadeData2.w;\n fadeParams.noiseOffset = _fadeData1.xyz;\n fadeParams.baseOffset = _fadeData2.xyz;\n fadeParams.baseInvSize = _fadeData3.xyz;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n vec4 texel = texture(originalTexture, _texCoord0.st);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n\n const float ALPHA_THRESHOLD = 0.999;\n if (colorAlpha * texel.a < ALPHA_THRESHOLD) {\n packDeferredFragmentTranslucent(\n normalize(_normalWS),\n colorAlpha * texel.a,\n _color.rgb * texel.rgb+fadeEmissive,\n DEFAULT_FRESNEL,\n DEFAULT_ROUGHNESS);\n } else {\n packDeferredFragmentUnlit(\n normalize(_normalWS),\n 1.0,\n _color.rgb * texel.rgb+fadeEmissive);\n }\n}\n\n"
+ },
+ "BKlbpHIbPaLybLfXXSdJKA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// local_lights_shading.frag\n// fragment shader\n//\n// Created by Sam Gateau on 9/6/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about deferred buffer\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\n\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform lightBuffer {\n\n\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n\n\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n\n\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}in vec2 _texCoord0;\nout vec4 _fragColor;\n\nvoid main(void) {\n _fragColor = vec4(0.0);\n\n // Grab the fragment data from the uv\n vec2 texCoord = _texCoord0.st;\n\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n\n\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, texCoord);\n vec4 fragPosition = frag.position;\n\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n }\n\n // Frag pos in world\n mat4 invViewMat = getViewInverse();\n vec4 fragWorldPos = invViewMat * fragPosition;\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(fragWorldPos);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (!hasLocalLights(numLights, clusterPos, dims)) {\n discard;\n }\n\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(texCoord, midNormalCurvature, lowNormalCurvature);\n }\n\n\n // Frag to eye vec\n vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);\n vec3 fragEyeDir = normalize(fragEyeVector.xyz);\n SurfaceData surface = initSurfaceData(frag.roughness, frag.normal, fragEyeDir);\n\n _fragColor = evalLocalLighting(cluster, numLights, fragWorldPos.xyz, surface, \n frag.metallic, frag.fresnel, frag.albedo, frag.scattering, \n midNormalCurvature, lowNormalCurvature, 1.0);\n\n}\n\n\n\n"
+ },
+ "BQ83agynx1cQM79Nsh7vsQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _tangentWS;\nlayout(location = 6) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive + fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "BjT/P1Q1JvAhSaNy0pqssQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_specular_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "Bxa7+GW9Fehb86BlWiNoDQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// overlay3D.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 _color;\nout float _alpha;\nout vec2 _texCoord0;\nout vec4 _positionES;\nout vec3 _normalWS;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n _texCoord0 = inTexCoord0.st;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// overlay3D_unlit.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 2/2/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nuniform sampler2D originalTexture;\n\nin vec2 _texCoord0;\nin vec3 _color;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec4 albedo = texture(originalTexture, _texCoord0);\n\n if (albedo.a <= 0.1) {\n discard;\n }\n vec4 color = vec4(albedo.rgb * _color, albedo.a);\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n\n"
+ },
+ "C/+VdBtAvRrtzYuIEm4KIQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_translucent_unlit_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n\n\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec3 _color;\nin float _alpha;\nin vec4 _positionWS;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n albedo += fadeEmissive;\n _fragColor = vec4(albedo * isUnlitEnabled(), opacity);\n}\n\n\n"
+ },
+ "CBCkoDtHdxwMHRDjxHBTgg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// surfaceGeometry_downsampleDepthNormal.frag\n//\n// Created by Sam Gateau on 6/3/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\nuniform sampler2D linearDepthMap;\nuniform sampler2D normalMap;\n\nin vec2 varTexCoord0;\n\nlayout(location = 0) out vec4 outLinearDepth;\nlayout(location = 1) out vec4 outNormal;\n\nvoid main(void) {\n // Gather 2 by 2 quads from texture and downsample\n\n // Try different filters for Z\n vec4 Zeyes = textureGather(linearDepthMap, varTexCoord0, 0);\n // float Zeye = texture(linearDepthMap, varTexCoord0).x;\n\n vec4 rawNormalsX = textureGather(normalMap, varTexCoord0, 0);\n vec4 rawNormalsY = textureGather(normalMap, varTexCoord0, 1);\n vec4 rawNormalsZ = textureGather(normalMap, varTexCoord0, 2);\n\n float Zeye = min(min(Zeyes.x, Zeyes.y), min(Zeyes.z, Zeyes.w));\n\n vec3 normal = vec3(0.0);\n normal += unpackNormal(vec3(rawNormalsX[0], rawNormalsY[0], rawNormalsZ[0]));\n normal += unpackNormal(vec3(rawNormalsX[1], rawNormalsY[1], rawNormalsZ[1]));\n normal += unpackNormal(vec3(rawNormalsX[2], rawNormalsY[2], rawNormalsZ[2]));\n normal += unpackNormal(vec3(rawNormalsX[3], rawNormalsY[3], rawNormalsZ[3]));\n\n normal = normalize(normal);\n\n outLinearDepth = vec4(Zeye, 0.0, 0.0, 0.0);\n outNormal = vec4((normal + vec3(1.0)) * 0.5, 0.0);\n}\n\n\n\n"
+ },
+ "CKRCiOl6f0gS65/2JDZnNA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:38 2018\n// skybox.vert\n// vertex shader\n//\n// Created by Sam Gateau on 5/5/2015.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec3 _normal;\n\nvoid main(void) {\n const float depth = 0.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 inPosition = UNIT_QUAD[gl_VertexID];\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n vec3 clipDir = vec3(inPosition.xy, 0.0);\n vec3 eyeDir;\n { // transformClipToEyeDir\n eyeDir = vec3(cam._projectionInverse * vec4(clipDir.xyz, 1.0)); // Must be 1.0 here\n }\n\n { // transformEyeToWorldDir\n _normal = vec3(cam._viewInverse * vec4(eyeDir.xyz, 0.0));\n }\n\n \n // Position is supposed to come in clip space\n gl_Position = vec4(inPosition.xy, 0.0, 1.0);\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:38 2018\n// skybox.frag\n// fragment shader\n//\n// Created by Sam Gateau on 5/5/2015.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nuniform samplerCube cubeMap;\n\nstruct Skybox {\n vec4 color;\n};\n\nuniform skyboxBuffer {\n Skybox skybox;\n};\n\nlayout(location = 0) in vec3 _normal;\nlayout(location = 0) out vec4 _fragColor;\n\n//PROCEDURAL_COMMON_BLOCK\n\n#line 1001\n//PROCEDURAL_BLOCK\n\n#line 2033\nvoid main(void) {\n\n#ifdef PROCEDURAL\n\n vec3 color = getSkyboxColor();\n // Protect from NaNs and negative values\n color = mix(color, vec3(0), isnan(color));\n color = max(color, vec3(0));\n // Procedural Shaders are expected to be Gamma corrected so let's bring back the RGB in linear space for the rest of the pipeline\n color = pow(color, vec3(2.2));\n _fragColor = vec4(color, 0.0);\n\n // FIXME: scribe does not yet scrub out else statements\n return;\n\n#else \n vec3 coord = normalize(_normal);\n vec3 color = skybox.color.rgb;\n\n // blend is only set if there is a cubemap\n if (skybox.color.a > 0.0) {\n color = texture(cubeMap, coord).rgb;\n if (skybox.color.a < 1.0) {\n color *= skybox.color.rgb;\n }\n }\n _fragColor = vec4(color, 0.0);\n\n#endif\n\n}\n\n\n"
+ },
+ "CMomjaLoGqMsZMDrr//xag==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_normal_map.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _tangentWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _tangentWS = vec3(dot(mr0, inTangent.xyz), dot(mr1, inTangent.xyz), dot(mr2, inTangent.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _tangentWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "DbXVJSaflA+BRMZFQs25WA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// velocityBuffer_cameraMotion.frag\n//\n// Created by Sam Gateau on 6/3/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nuniform sampler2D depthMap;\n\n\nvoid main(void) {\n // Pixel being shaded\n ivec2 pixelPos;\n vec2 texcoordPos;\n ivec4 stereoSide;\n ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);\n \n\tfloat Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;\n\n\t// The position of the pixel fragment in Eye space then in world space\n vec3 eyePos = evalUnjitteredEyePositionFromZdb(stereoSide.x, Zdb, texcoordPos);\n\tvec3 worldPos = (getViewInverse() * vec4(eyePos, 1.0)).xyz;\n \n vec3 prevEyePos = (getPreviousView() * vec4(worldPos, 1.0)).xyz;\n vec4 prevClipPos = (getUnjitteredProjection(stereoSide.x) * vec4(prevEyePos, 1.0));\n vec2 prevUV = 0.5 * (prevClipPos.xy / prevClipPos.w) + vec2(0.5);\n\n //vec2 imageSize = getWidthHeight(0);\n vec2 imageSize = vec2(1.0, 1.0);\n outFragColor = vec4( ((texcoordPos - prevUV) * imageSize), 0.0, 0.0);\n}\n\n\n"
+ },
+ "EG2IuGsgmBcJjNk/YAaKqw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_translucent.vert\n// vertex shader\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout float _alpha;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec4 _positionES;\nout vec4 _positionWS;\nout vec3 _normalWS;\nout vec3 _color;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_translucent.frag\n// fragment shader\n//\n// Created by Sam Gateau on 2/15/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n\n\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "FMMwR7cPDX2l4zmI4MipUA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// standardTransformPNTC.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/10/2015.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 varPosition;\nout vec3 varNormal;\nout vec2 varTexCoord0;\nout vec4 varColor;\n\nvoid main(void) {\n varTexCoord0 = inTexCoord0.st;\n varColor = color_sRGBAToLinear(inColor);\n \n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n varNormal = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n varPosition = inPosition.xyz;\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTextureOpaque.frag\n//\n// Draw texture 0 fetched at texcoord.xy\n// Alpha is 1\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nuniform sampler2D colorMap;\n\nlayout(location = 0) in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = vec4(texture(colorMap, varTexCoord0).xyz, 1.0);\n}\n\n\n"
+ },
+ "FWQxyxWPPa+OEK1hceNbvA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n\n\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_translucent.frag\n// fragment shader\n//\n// Created by Sam Gateau on 2/15/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n\n\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "FnU12MybTZGltNrZuBl8aA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// surfaceGeometry_makeLinearDepth.frag\n//\n// Created by Sam Gateau on 6/3/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\n\nuniform sampler2D depthMap;\n\nout vec4 outFragColor;\n\nvoid main(void) {\n float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;\n float Zeye = -evalZeyeFromZdb(Zdb);\n outFragColor = vec4(Zeye, 0.0, 0.0, 1.0);\n}\n\n\n\n"
+ },
+ "FvDaaBFKlFq2M97g5NVmCA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_lightmap.vert\n// vertex shader\n//\n// Created by Sam Gateau on 11/21/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\n\nvoid main(void) {\n // pass along the color in linear space\n _color = color_sRGBToLinear(inColor.xyz);\n\n // and the texture coordinates\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord1.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_lightmap.frag\n// fragment shader\n//\n// Created by Samuel Gateau on 11/19/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\n#endif\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nuniform sampler2D emissiveMap;\nvec3 fetchLightmapMap(vec2 uv) {\n vec2 emissiveParams = getTexMapArray()._lightmapParams.xy;\n return (vec3(emissiveParams.x) + emissiveParams.y * texture(emissiveMap, uv).rgb);\n}\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedo = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\n\n vec3 lightmapVal = fetchLightmapMap(_texCoord1);\n\n\n packDeferredFragmentLightmap(\n normalize(_normalWS), \n evalOpaqueFinalAlpha(getMaterialOpacity(mat), albedo.a),\n getMaterialAlbedo(mat) * albedo.rgb * _color,\n getMaterialRoughness(mat) * roughness,\n getMaterialMetallic(mat) * metallicTex,\n /*metallicTex, // no use of */getMaterialFresnel(mat),\n lightmapVal);\n}\n\n\n"
+ },
+ "G2a+Y4aUEqhZn7hakpwZFQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// standardTransformPNTC.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/10/2015.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 varPosition;\nout vec3 varNormal;\nout vec2 varTexCoord0;\nout vec4 varColor;\n\nvoid main(void) {\n varTexCoord0 = inTexCoord0.st;\n varColor = color_sRGBAToLinear(inColor);\n \n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n varNormal = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n varPosition = inPosition.xyz;\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n// standardDrawTexture.frag\n// fragment shader\n//\n// Created by Sam Gateau on 6/10/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// the texture\nuniform sampler2D colorMap;\n\nin vec3 varPosition;\nin vec3 varNormal;\nin vec2 varTexCoord0;\nin vec4 varColor;\n\nout vec4 outFragColor;\n\nvoid main(void) {\n vec4 color = texture(colorMap, varTexCoord0);\n outFragColor = color * varColor;\n}\n\n\n"
+ },
+ "G46XKIM0jJ6vgDbngE52hQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n\n\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive+fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "GOAzIGB6zqkuFtzp9rtLOg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// surfaceGeometry_makeLinearDepth.frag\n//\n// Created by Sam Gateau on 6/3/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\n\nuniform sampler2D depthMap;\n\nout vec4 outFragColor;\n\nvoid main(void) {\n float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;\n float Zeye = -evalZeyeFromZdb(Zdb);\n outFragColor = vec4(Zeye, 0.0, 0.0, 1.0);\n}\n\n\n\n"
+ },
+ "HFU9mHLJX5P5/IfniUnl8A==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_normal_map.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _tangentWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _tangentWS = vec3(dot(mr0, inTangent.xyz), dot(mr1, inTangent.xyz), dot(mr2, inTangent.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _tangentWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "HRnAky0KbclLNFv8f2spcA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_shadow.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nvoid main(void) {\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "Hg1/zhYJuA3PGnI8pJnDNA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n\n\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n\n\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive+fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "I1NME6mcvVUMbl/SdAU7Og==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// subsurfaceScattering_makeLUT.frag\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nfloat gaussian(float v, float r) {\n const float _PI = 3.14159265358979523846;\n return (1.0 / sqrt(2.0 * _PI * v)) * exp(-(r*r) / (2.0 * v));\n}\n\nvec3 scatter(float r) {\n // r is the distance expressed in millimeter\n // returns the scatter reflectance\n // Values from GPU Gems 3 \"Advanced Skin Rendering\".\n // Originally taken from real life samples.\n const vec4 profile[6] = vec4[6](\n vec4(0.0064, 0.233, 0.455, 0.649),\n vec4(0.0484, 0.100, 0.336, 0.344),\n vec4(0.1870, 0.118, 0.198, 0.000),\n vec4(0.5670, 0.113, 0.007, 0.007),\n vec4(1.9900, 0.358, 0.004, 0.000),\n vec4(7.4100, 0.078, 0.000, 0.000)\n );\n const int profileNum = 6;\n\n vec3 ret = vec3(0.0);\n for (int i = 0; i < profileNum; i++) {\n float v = profile[i].x * 1.414;\n float g = gaussian(v, r);\n ret += g * profile[i].yzw;\n }\n\n return ret;\n}\n\n\nvec3 integrate(float cosTheta, float skinRadius) {\n // Angle from lighting direction.\n float theta = acos(cosTheta);\n vec3 totalWeights = vec3(0.0);\n vec3 totalLight = vec3(0.0);\n\n const float _PI = 3.14159265358979523846;\n const float step = 2.0 * _PI / float(2000);\n float a = -(_PI);\n\n\n while (a <= (_PI)) {\n float sampleAngle = theta + a;\n float diffuse = clamp(cos(sampleAngle), 0.0, 1.0);\n //if (diffuse < 0.0) diffuse = 0.0;\n //if (diffuse > 1.0) diffuse = 1.0;\n\n // Distance.\n float sampleDist = abs(2.0 * skinRadius * sin(a * 0.5));\n\n // Profile Weight.\n vec3 weights = scatter(sampleDist);\n\n totalWeights += weights;\n totalLight += diffuse * weights;\n a += step;\n }\n\n vec3 result = (totalLight / totalWeights);\n return clamp(result, vec3(0.0), vec3(1.0));\n\n}\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nvoid main(void) {\n\n // Lookup by: x: NDotL y: 1 / r\n //float y = 2.0 * 1.0 / ((j + 1.0) / (double)height);\n //float x = ((i / (double)width) * 2.0) - 1.0;\n\n outFragColor = vec4(integrate(varTexCoord0.x * 2.0 - 1.0, 2.0 / varTexCoord0.y), 1.0);\n}\n\n\n\n"
+ },
+ "IJ62ZMCaEEKluYEcsDAtrQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// taa.frag\n// fragment shader\n//\n// Created by Sam Gateau on 8/14/2017\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// TAA.slh\n// Common component needed by TemporalAntialiasing fragment shader\n//\n// Created by Sam Gateau on 8/17/2017\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nuniform sampler2D depthMap;\nuniform sampler2D sourceMap;\nuniform sampler2D historyMap;\nuniform sampler2D velocityMap;\nuniform sampler2D nextMap;\n\nstruct TAAParams\n{\n\tfloat none;\n\tfloat blend;\n\tfloat covarianceGamma;\n\tfloat debugShowVelocityThreshold;\n ivec4 flags;\n vec4 pixelInfo_orbZoom;\n vec4 regionInfo;\n};\n\nlayout(std140) uniform taaParamsBuffer {\n TAAParams params;\n};\n\n#define GET_BIT(bitfield, bitIndex) bool((bitfield) & (1 << (bitIndex)))\n\nbool taa_isDebugEnabled() {\n return GET_BIT(params.flags.x, 0);\n}\n\nbool taa_showDebugCursor() {\n return GET_BIT(params.flags.x, 1);\n}\n\nbool taa_showClosestFragment() {\n return GET_BIT(params.flags.x, 3);\n}\n\nbool taa_constrainColor() {\n return GET_BIT(params.flags.y, 1);\n}\n\nbool taa_feedbackColor() {\n return GET_BIT(params.flags.y, 4);\n}\n\nvec2 taa_getDebugCursorTexcoord() {\n return params.pixelInfo_orbZoom.xy;\n}\n\nfloat taa_getDebugOrbZoom() {\n return params.pixelInfo_orbZoom.z;\n}\n\nvec2 taa_getRegionDebug() {\n return params.regionInfo.xy;\n}\n\nvec2 taa_getRegionFXAA() {\n return params.regionInfo.zw;\n}\n#define USE_YCOCG 1\n\nvec4 taa_fetchColor(sampler2D map, vec2 uv) {\n\tvec4 c = texture(map, uv);\n\t// Apply rapid pseudo tonemapping as TAA is applied to a tonemapped image, using luminance as weight, as proposed in\n\t// https://de45xmedrsdbp.cloudfront.net/Resources/files/TemporalAA_small-59732822.pdf\n\tfloat lum = dot(vec3(0.3,0.5,0.2),c.rgb);\n\tc.rgb = c.rgb / (1.0+lum);\n#if USE_YCOCG\n\treturn vec4(color_LinearToYCoCg(c.rgb), c.a);\n#else\n\treturn c;\n#endif\n}\n\nvec3 taa_resolveColor(vec3 color) {\n#if USE_YCOCG\n\tcolor = max(vec3(0), color_YCoCgToUnclampedLinear(color));\n#endif\n\t// Apply rapid inverse tonemapping, using luminance as weight, as proposed in\n\t// https://de45xmedrsdbp.cloudfront.net/Resources/files/TemporalAA_small-59732822.pdf\n\tfloat lum = dot(vec3(0.3,0.5,0.2),color.rgb);\n\tcolor = color / (1.0-lum);\n\treturn color;\n}\n\nvec4 taa_fetchSourceMap(vec2 uv) {\n\treturn taa_fetchColor(sourceMap, uv);\n}\n\nvec4 taa_fetchHistoryMap(vec2 uv) {\n\treturn taa_fetchColor(historyMap, uv);\n}\n\nvec4 taa_fetchNextMap(vec2 uv) {\n\treturn taa_fetchColor(nextMap, uv);\n}\n\nvec2 taa_fetchVelocityMap(vec2 uv) {\n\treturn texture(velocityMap, uv).xy;\n}\n\nfloat taa_fetchDepth(vec2 uv) {\n\treturn -texture(depthMap, vec2(uv), 0).x;\n}\n\n\n#define ZCMP_GT(a, b) (a > b)\n\nvec2 taa_getImageSize() {\n vec2 imageSize = getWidthHeight(0);\n if (isStereo()) {\n imageSize.x *= 2.0;\n }\n return imageSize;\n}\n\nvec2 taa_getTexelSize() {\n vec2 texelSize = getInvWidthHeight();\n if (isStereo()) {\n texelSize.x *= 0.5;\n }\n return texelSize;\n}\n\nvec3 taa_findClosestFragment3x3(vec2 uv)\n{\n\tvec2 dd = abs(taa_getTexelSize());\n\tvec2 du = vec2(dd.x, 0.0);\n\tvec2 dv = vec2(0.0, dd.y);\n\n\tvec3 dtl = vec3(-1, -1, taa_fetchDepth(uv - dv - du));\n\tvec3 dtc = vec3( 0, -1, taa_fetchDepth(uv - dv));\n\tvec3 dtr = vec3( 1, -1, taa_fetchDepth(uv - dv + du));\n\n\tvec3 dml = vec3(-1, 0, taa_fetchDepth(uv - du));\n\tvec3 dmc = vec3( 0, 0, taa_fetchDepth(uv));\n\tvec3 dmr = vec3( 1, 0, taa_fetchDepth(uv + du));\n\n\tvec3 dbl = vec3(-1, 1, taa_fetchDepth(uv + dv - du));\n\tvec3 dbc = vec3( 0, 1, taa_fetchDepth(uv + dv));\n\tvec3 dbr = vec3( 1, 1, taa_fetchDepth(uv + dv + du));\n\n\tvec3 dmin = dtl;\n\tif (ZCMP_GT(dmin.z, dtc.z)) dmin = dtc;\n\tif (ZCMP_GT(dmin.z, dtr.z)) dmin = dtr;\n\n\tif (ZCMP_GT(dmin.z, dml.z)) dmin = dml;\n\tif (ZCMP_GT(dmin.z, dmc.z)) dmin = dmc;\n\tif (ZCMP_GT(dmin.z, dmr.z)) dmin = dmr;\n\n\tif (ZCMP_GT(dmin.z, dbl.z)) dmin = dbl;\n\tif (ZCMP_GT(dmin.z, dbc.z)) dmin = dbc;\n\n\n\tif (ZCMP_GT(dmin.z, dbr.z)) dmin = dbr;\n\n\treturn vec3(uv + dd.xy * dmin.xy, dmin.z);\n}\n\nvec2 taa_fetchVelocityMapBest(vec2 uv) {\n vec2 dd = abs(taa_getTexelSize());\n vec2 du = vec2(dd.x, 0.0);\n vec2 dv = vec2(0.0, dd.y);\n\n vec2 dtl = taa_fetchVelocityMap(uv - dv - du);\n vec2 dtc = taa_fetchVelocityMap(uv - dv);\n vec2 dtr = taa_fetchVelocityMap(uv - dv + du);\n\n vec2 dml = taa_fetchVelocityMap(uv - du);\n vec2 dmc = taa_fetchVelocityMap(uv);\n vec2 dmr = taa_fetchVelocityMap(uv + du);\n\n vec2 dbl = taa_fetchVelocityMap(uv + dv - du);\n vec2 dbc = taa_fetchVelocityMap(uv + dv);\n vec2 dbr = taa_fetchVelocityMap(uv + dv + du);\n\n vec3 best = vec3(dtl, dot(dtl,dtl));\n\n float testSpeed = dot(dtc,dtc);\n if (testSpeed > best.z) { best = vec3(dtc, testSpeed); }\n testSpeed = dot(dtr,dtr);\n if (testSpeed > best.z) { best = vec3(dtr, testSpeed); }\n\n testSpeed = dot(dml,dml);\n if (testSpeed > best.z) { best = vec3(dml, testSpeed); }\n testSpeed = dot(dmc,dmc);\n if (testSpeed > best.z) { best = vec3(dmc, testSpeed); }\n testSpeed = dot(dmr,dmr);\n if (testSpeed > best.z) { best = vec3(dmr, testSpeed); }\n\n testSpeed = dot(dbl,dbl);\n if (testSpeed > best.z) { best = vec3(dbl, testSpeed); }\n testSpeed = dot(dbc,dbc);\n if (testSpeed > best.z) { best = vec3(dbc, testSpeed); }\n testSpeed = dot(dbr,dbr);\n if (testSpeed > best.z) { best = vec3(dbr, testSpeed); }\n\n return best.xy;\n}\n\nvec2 taa_fromFragUVToEyeUVAndSide(vec2 fragUV, out int stereoSide) {\n vec2 eyeUV = fragUV;\n stereoSide = 0;\n if (isStereo()) {\n if (eyeUV.x > 0.5) {\n eyeUV.x -= 0.5;\n stereoSide = 1;\n }\n eyeUV.x *= 2.0;\n }\n return eyeUV;\n}\n\nvec2 taa_fromEyeUVToFragUV(vec2 eyeUV, int stereoSide) {\n vec2 fragUV = eyeUV;\n if (isStereo()) {\n fragUV.x *= 0.5;\n fragUV.x += stereoSide*0.5;\n }\n return fragUV;\n}\n\nvec2 taa_computePrevFragAndEyeUV(vec2 fragUV, vec2 fragVelocity, out vec2 prevEyeUV) {\n int stereoSide = 0;\n vec2 eyeUV = taa_fromFragUVToEyeUVAndSide(fragUV, stereoSide);\n prevEyeUV = eyeUV - fragVelocity;\n return taa_fromEyeUVToFragUV(prevEyeUV, stereoSide);\n}\n\nvec2 taa_fetchSourceAndHistory(vec2 fragUV, vec2 fragVelocity, out vec3 sourceColor, out vec3 historyColor) {\n vec2 prevEyeUV;\n vec2 prevFragUV = taa_computePrevFragAndEyeUV(fragUV, fragVelocity, prevEyeUV);\n sourceColor = taa_fetchSourceMap(fragUV).xyz;\n\n historyColor = sourceColor;\n if (!(any(lessThan(prevEyeUV, vec2(0.0))) || any(greaterThan(prevEyeUV, vec2(1.0))))) {\n historyColor = taa_fetchHistoryMap(prevFragUV).xyz;\n }\n return prevFragUV;\n}\n\nfloat Luminance(vec3 rgb) {\n return rgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0;\n}\n\n#define MINMAX_3X3_ROUNDED 1\n\nmat3 taa_evalNeighbourColorVariance(vec3 sourceColor, vec2 fragUV, vec2 fragVelocity) {\n vec2 texelSize = taa_getTexelSize();\n \n\n\tvec2 du = vec2(texelSize.x, 0.0);\n\tvec2 dv = vec2(0.0, texelSize.y);\n\n vec3 sampleColor = taa_fetchSourceMap(fragUV - dv - du).rgb;\n vec3 sumSamples = sampleColor;\n vec3 sumSamples2 = sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV - dv).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV - dv + du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV - du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = sourceColor; //taa_fetchSourceMap(fragUV).rgb; // could resuse the same osurce sampleColor isn't it ?\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV + du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV + dv - du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV + dv).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n \n sampleColor = taa_fetchSourceMap(fragUV + dv + du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n \n vec3 mu = sumSamples / vec3(9.0);\n vec3 sigma = sqrt(max(sumSamples2 / vec3(9.0) - mu * mu, vec3(0.0)));\n \n float gamma = params.covarianceGamma;\n vec3 cmin = mu - gamma * sigma;\n vec3 cmax = mu + gamma * sigma;\n\n return mat3(cmin, cmax, mu);\n}\n\nmat3 taa_evalNeighbourColorRegion(vec3 sourceColor, vec2 fragUV, vec2 fragVelocity, float fragZe) {\n vec2 imageSize = taa_getImageSize();\n vec2 texelSize = taa_getTexelSize();\n vec3 cmin, cmax, cavg;\n\n #if MINMAX_3X3_ROUNDED\n\t\tvec2 du = vec2(texelSize.x, 0.0);\n\t\tvec2 dv = vec2(0.0, texelSize.y);\n\n\t\tvec3 ctl = taa_fetchSourceMap(fragUV - dv - du).rgb;\n\t\tvec3 ctc = taa_fetchSourceMap(fragUV - dv).rgb;\n\t\tvec3 ctr = taa_fetchSourceMap(fragUV - dv + du).rgb;\n\t\tvec3 cml = taa_fetchSourceMap(fragUV - du).rgb;\n\t\tvec3 cmc = sourceColor; //taa_fetchSourceMap(fragUV).rgb; // could resuse the same osurce sample isn't it ?\n\t\tvec3 cmr = taa_fetchSourceMap(fragUV + du).rgb;\n\t\tvec3 cbl = taa_fetchSourceMap(fragUV + dv - du).rgb;\n\t\tvec3 cbc = taa_fetchSourceMap(fragUV + dv).rgb;\n\t\tvec3 cbr = taa_fetchSourceMap(fragUV + dv + du).rgb;\n\n\t\tcmin = min(ctl, min(ctc, min(ctr, min(cml, min(cmc, min(cmr, min(cbl, min(cbc, cbr))))))));\n\t\tcmax = max(ctl, max(ctc, max(ctr, max(cml, max(cmc, max(cmr, max(cbl, max(cbc, cbr))))))));\n\n\t\t#if MINMAX_3X3_ROUNDED || USE_YCOCG || USE_CLIPPING\n\t\t\tcavg = (ctl + ctc + ctr + cml + cmc + cmr + cbl + cbc + cbr) / 9.0;\n #elif\n cavg = (cmin + cmax ) * 0.5;\n\t\t#endif\n\n\t\t#if MINMAX_3X3_ROUNDED\n\t\t\tvec3 cmin5 = min(ctc, min(cml, min(cmc, min(cmr, cbc))));\n\t\t\tvec3 cmax5 = max(ctc, max(cml, max(cmc, max(cmr, cbc))));\n\t\t\tvec3 cavg5 = (ctc + cml + cmc + cmr + cbc) / 5.0;\n\t\t\tcmin = 0.5 * (cmin + cmin5);\n\t\t\tcmax = 0.5 * (cmax + cmax5);\n\t\t\tcavg = 0.5 * (cavg + cavg5);\n\t\t#endif\n #else\n\t\tconst float _SubpixelThreshold = 0.5;\n\t\tconst float _GatherBase = 0.5;\n\t\tconst float _GatherSubpixelMotion = 0.1666;\n\n\t\tvec2 texel_vel = fragVelocity * imageSize;\n\t\tfloat texel_vel_mag = length(texel_vel) * -fragZe;\n\t\tfloat k_subpixel_motion = clamp(_SubpixelThreshold / (0.0001 + texel_vel_mag), 0.0, 1.0);\n\t\tfloat k_min_max_support = _GatherBase + _GatherSubpixelMotion * k_subpixel_motion;\n\n\t\tvec2 ss_offset01 = k_min_max_support * vec2(-texelSize.x, texelSize.y);\n\t\tvec2 ss_offset11 = k_min_max_support * vec2(texelSize.x, texelSize.y);\n\t\tvec3 c00 = taa_fetchSourceMap(fragUV - ss_offset11).rgb;\n\t\tvec3 c10 = taa_fetchSourceMap(fragUV - ss_offset01).rgb;\n\t\tvec3 c01 = taa_fetchSourceMap(fragUV + ss_offset01).rgb;\n\t\tvec3 c11 = taa_fetchSourceMap(fragUV + ss_offset11).rgb;\n\n\t\tcmin = min(c00, min(c10, min(c01, c11)));\n\t\tcmax = max(c00, max(c10, max(c01, c11)));\n cavg = (cmin + cmax ) * 0.5;\n\n\t\t#if USE_YCOCG || USE_CLIPPING\n\t\t\tcavg = (c00 + c10 + c01 + c11) / 4.0;\n #elif\n cavg = (cmin + cmax ) * 0.5;\n\t\t#endif\n #endif\n\n \t\t// shrink chroma min-max\n\t#if USE_YCOCG\n\t\tvec2 chroma_extent = vec2(0.25 * 0.5 * (cmax.r - cmin.r));\n\t\tvec2 chroma_center = sourceColor.gb;\n\t\tcmin.yz = chroma_center - chroma_extent;\n\t\tcmax.yz = chroma_center + chroma_extent;\n\t\tcavg.yz = chroma_center;\n\t#endif\n\n return mat3(cmin, cmax, cavg);\n}\n\n//#define USE_OPTIMIZATIONS 0\n\nvec3 taa_clampColor(vec3 colorMin, vec3 colorMax, vec3 colorSource, vec3 color) {\n\tconst float eps = 0.00001;\n vec3 p = colorSource;\n vec3 q = color;\n\t// note: only clips towards aabb center (but fast!)\n\tvec3 p_clip = 0.5 * (colorMax + colorMin);\n\tvec3 e_clip = 0.5 * (colorMax - colorMin) + vec3(eps);\n\n\tvec3 v_clip = q - p_clip;\n\tvec3 v_unit = v_clip.xyz / e_clip;\n\tvec3 a_unit = abs(v_unit);\n\tfloat ma_unit = max(a_unit.x, max(a_unit.y, a_unit.z));\n\n\tif (ma_unit > 1.0)\n\t\treturn p_clip + v_clip / ma_unit;\n\telse\n\t\treturn q;// point inside aabb\t\t\n}\n\nvec3 taa_evalConstrainColor(vec3 sourceColor, vec2 sourceUV, vec2 sourceVel, vec3 candidateColor) {\n mat3 colorMinMaxAvg;\n\n colorMinMaxAvg = taa_evalNeighbourColorVariance(sourceColor, sourceUV, sourceVel);\n \n\t// clamp history to neighbourhood of current sample\n return taa_clampColor(colorMinMaxAvg[0], colorMinMaxAvg[1], sourceColor, candidateColor);\n}\n\nvec3 taa_evalFeedbackColor(vec3 sourceColor, vec3 historyColor, float blendFactor) {\n const float _FeedbackMin = 0.1;\n const float _FeedbackMax = 0.9;\n\t// feedback weight from unbiased luminance diff (t.lottes)\n\t#if USE_YCOCG\n\t\tfloat lum0 = sourceColor.r;\n\t\tfloat lum1 = historyColor.r;\n\t#else\n\t\tfloat lum0 = Luminance(sourceColor.rgb);\n\t\tfloat lum1 = Luminance(historyColor.rgb);\n\t#endif\n\tfloat unbiased_diff = abs(lum0 - lum1) / max(lum0, max(lum1, 0.2));\n\tfloat unbiased_weight = 1.0 - unbiased_diff;\n\tfloat unbiased_weight_sqr = unbiased_weight * unbiased_weight;\n\tfloat k_feedback = mix(_FeedbackMin, _FeedbackMax, unbiased_weight_sqr);\n\n \n vec3 nextColor = mix(historyColor, sourceColor, k_feedback * blendFactor).xyz;\n return nextColor;\n}\n\n\nvec3 colorWheel(float normalizedHue) {\n float v = normalizedHue * 6.f;\n if (v < 0.f) {\n return vec3(1.f, 0.f, 0.f);\n } else if (v < 1.f) {\n return vec3(1.f, v, 0.f);\n } else if (v < 2.f) {\n return vec3(1.f - (v-1.f), 1.f, 0.f);\n } else if (v < 3.f) {\n return vec3(0.f, 1.f, (v-2.f));\n } else if (v < 4.f) {\n return vec3(0.f, 1.f - (v-3.f), 1.f );\n } else if (v < 5.f) {\n return vec3((v-4.f), 0.f, 1.f );\n } else if (v < 6.f) {\n return vec3(1.f, 0.f, 1.f - (v-5.f));\n } else {\n return vec3(1.f, 0.f, 0.f);\n }\n}\n\nvec3 colorRamp(float normalizedHue) {\n float v = normalizedHue * 5.f;\n if (v < 0.f) {\n return vec3(1.f, 0.f, 0.f);\n } else if (v < 1.f) {\n return vec3(1.f, v, 0.f);\n\n\n } else if (v < 2.f) {\n return vec3(1.f - (v - 1.f), 1.f, 0.f);\n } else if (v < 3.f) {\n return vec3(0.f, 1.f, (v - 2.f));\n } else if (v < 4.f) {\n return vec3(0.f, 1.f - (v - 3.f), 1.f);\n } else if (v < 5.f) {\n return vec3((v - 4.f), 0.f, 1.f);\n } else {\n return vec3(1.f, 0.f, 1.f);\n }\n}\n\n\nvec3 taa_getVelocityColorRelative(float velocityPixLength) {\n return colorRamp(velocityPixLength/params.debugShowVelocityThreshold);\n}\n\nvec3 taa_getVelocityColorAboveThreshold(float velocityPixLength) {\n return colorRamp((velocityPixLength - params.debugShowVelocityThreshold)/params.debugShowVelocityThreshold);\n}\n\n\nvec3 taa_evalFXAA(vec2 fragUV) {\n\n // vec2 texelSize = getInvWidthHeight();\n vec2 texelSize = taa_getTexelSize();\n\n // filter width limit for dependent \"two-tap\" texture samples\n float FXAA_SPAN_MAX = 8.0;\n\n // local contrast multiplier for performing AA\n // higher = sharper, but setting this value too high will cause near-vertical and near-horizontal edges to fail\n // see \"fxaaQualityEdgeThreshold\"\n float FXAA_REDUCE_MUL = 1.0 / 8.0;\n\n // luminance threshold for processing dark colors\n // see \"fxaaQualityEdgeThresholdMin\"\n float FXAA_REDUCE_MIN = 1.0 / 128.0;\n\n // fetch raw RGB values for nearby locations\n // sampling pattern is \"five on a die\" (each diagonal direction and the center)\n // computing the coordinates for these texture reads could be moved to the vertex shader for speed if needed\n vec3 rgbNW = texture(sourceMap, fragUV + (vec2(-1.0, -1.0) * texelSize)).xyz;\n vec3 rgbNE = texture(sourceMap, fragUV + (vec2(+1.0, -1.0) * texelSize)).xyz;\n vec3 rgbSW = texture(sourceMap, fragUV + (vec2(-1.0, +1.0) * texelSize)).xyz;\n vec3 rgbSE = texture(sourceMap, fragUV + (vec2(+1.0, +1.0) * texelSize)).xyz;\n vec3 rgbM = texture(sourceMap, fragUV).xyz;\n\t\n // convert RGB values to luminance\n vec3 luma = vec3(0.299, 0.587, 0.114);\n float lumaNW = dot(rgbNW, luma);\n float lumaNE = dot(rgbNE, luma);\n float lumaSW = dot(rgbSW, luma);\n float lumaSE = dot(rgbSE, luma);\n float lumaM = dot( rgbM, luma);\n\t\n // luma range of local neighborhood\n float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));\n float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));\n\t\n // direction perpendicular to local luma gradient\n vec2 dir;\n dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));\n dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));\n\n // compute clamped direction offset for additional \"two-tap\" samples\n // longer vector = blurry, shorter vector = sharp\n float dirReduce = max((lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL), FXAA_REDUCE_MIN);\n float rcpDirMin = 1.0 / (min(abs(dir.x), abs(dir.y)) + dirReduce);\n dir = min(vec2(FXAA_SPAN_MAX, FXAA_SPAN_MAX), \n max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX), dir * rcpDirMin)) * texelSize;\n\t\t\n // perform additional texture sampling perpendicular to gradient\n vec3 rgbA = (1.0 / 2.0) * (\n texture(sourceMap, fragUV + dir * (1.0 / 3.0 - 0.5)).xyz +\n texture(sourceMap, fragUV + dir * (2.0 / 3.0 - 0.5)).xyz);\n vec3 rgbB = rgbA * (1.0 / 2.0) + (1.0 / 4.0) * (\n texture(sourceMap, fragUV + dir * (0.0 / 3.0 - 0.5)).xyz +\n texture(sourceMap, fragUV + dir * (3.0 / 3.0 - 0.5)).xyz);\n float lumaB = dot(rgbB, luma);\n\n // compare luma of new samples to the luma range of the original neighborhood\n // if the new samples exceed this range, just use the first two samples instead of all four\n if (lumaB < lumaMin || lumaB > lumaMax) {\n return rgbA;\n } else {\n return rgbB;\n }\n}in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main() {\n vec2 fragUV = varTexCoord0;\n\n // Debug region before debug or fxaa region X\n float distToRegionFXAA = fragUV.x - taa_getRegionFXAA().x; \n if (distToRegionFXAA > 0.0) {\n outFragColor = vec4(taa_evalFXAA(fragUV), 1.0);\n return;\n }\n\n vec2 fragVel = taa_fetchVelocityMapBest(fragUV).xy;\n\n vec3 sourceColor;\n vec3 historyColor;\n vec2 prevFragUV = taa_fetchSourceAndHistory(fragUV, fragVel, sourceColor, historyColor);\n\n vec3 nextColor = sourceColor;\n \n if (taa_constrainColor()) {\n // clamp history to neighbourhood of current sample\n historyColor = taa_evalConstrainColor(sourceColor, fragUV, fragVel, historyColor);\n }\n \n if (taa_feedbackColor()) {\n nextColor = taa_evalFeedbackColor(sourceColor, historyColor, params.blend);\n } else {\n nextColor = mix(historyColor, sourceColor, params.blend);\n }\n\n outFragColor = vec4(taa_resolveColor(nextColor), 1.0);\n}\n\n\n"
+ },
+ "IYnWu6i5FAKpknqGF8kWag==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// Haze.frag\n//\n// Created by Nissim Hadar on 9/5/2107.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n\n\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n\n\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n\n\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nuniform sampler2D colorMap;\nuniform sampler2D linearDepthMap;\n\nvec4 unpackPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearDepthMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nvoid main(void) {\n if ((isHazeEnabled() == 0.0) || (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) != HAZE_MODE_IS_ACTIVE) {\n discard;\n }\n\n vec4 fragColor = texture(colorMap, varTexCoord0);\n vec4 fragPositionES = unpackPositionFromZeye(varTexCoord0);\n\n mat4 viewInverse = getViewInverse();\n vec4 fragPositionWS = viewInverse * fragPositionES;\n vec4 eyePositionWS = viewInverse[3];\n\n Light light = getKeyLight();\n vec3 lightDirectionWS = getLightDirection(light);\n\n outFragColor = computeHazeColor(fragColor, fragPositionES.xyz, fragPositionWS.xyz, eyePositionWS.xyz, lightDirectionWS);\n}\n\n\n"
+ },
+ "J0EApsSNXA43OTVFxWYe2w==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\n\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "JefB90FYCNd4kFnBpAxuYw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\n\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// skin_model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "JrU2NNcLi2CpWUGd110RBQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n\n\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive+fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "JztsCp1r0zhCcplOJSKEIg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:43 2018\n//\n// blurGaussianDepthAwareH.frag\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Olivier Prat on 09/25/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n#define BLUR_MAX_NUM_TAPS 33\nstruct BlurParameters {\n vec4 resolutionInfo;\n vec4 texcoordTransform;\n vec4 filterInfo;\n vec4 depthInfo;\n vec4 stereoInfo;\n vec4 linearDepthInfo;\n vec2 taps[BLUR_MAX_NUM_TAPS];\n};\n\nuniform blurParamsBuffer {\n BlurParameters parameters;\n};\n\nvec2 getViewportInvWidthHeight() {\n return parameters.resolutionInfo.zw;\n}\n\nvec2 evalTexcoordTransformed(vec2 texcoord) {\n return (texcoord * parameters.texcoordTransform.zw + parameters.texcoordTransform.xy);\n}\n\nfloat getFilterScale() {\n return parameters.filterInfo.x;\n}\n\nint getFilterNumTaps() {\n return int(parameters.filterInfo.y);\n}\n\nfloat getOutputAlpha() {\n return parameters.filterInfo.z;\n}\n\nvec2 getFilterTap(int index) {\n return parameters.taps[index];\n}\n\nfloat getFilterTapOffset(vec2 tap) {\n return tap.x;\n}\n\nfloat getFilterTapWeight(vec2 tap) {\n return tap.y;\n}\n\nfloat getDepthThreshold() {\n return parameters.depthInfo.x;\n}\n\nfloat getDepthPerspective() {\n return parameters.depthInfo.w;\n}\n\nfloat getPosLinearDepthFar() {\n return parameters.linearDepthInfo.x;\n}\n\n\n\nuniform sampler2D sourceMap;\nuniform sampler2D depthMap;\n\nvec4 pixelShaderGaussianDepthAware(vec2 texcoord, vec2 direction, vec2 pixelStep) {\n texcoord = evalTexcoordTransformed(texcoord);\n float sampleDepth = texture(depthMap, texcoord).x;\n if (sampleDepth >= getPosLinearDepthFar()) {\n discard;\n }\n vec4 sampleCenter = texture(sourceMap, texcoord);\n\n // Calculate the width scale.\n float distanceToProjectionWindow = getDepthPerspective();\n\n float depthThreshold = getDepthThreshold();\n\n // Calculate the final step to fetch the surrounding pixels.\n float filterScale = getFilterScale();\n float scale = distanceToProjectionWindow / sampleDepth;\n\n vec2 finalStep = filterScale * scale * direction * pixelStep;\n int numTaps = getFilterNumTaps();\n\n // Accumulate the center sample\n vec2 tapInfo = getFilterTap(0);\n float totalWeight = getFilterTapWeight(tapInfo);\n vec4 srcBlurred = sampleCenter * totalWeight;\n\n for(int i = 1; i < numTaps; i++) {\n tapInfo = getFilterTap(i);\n\n // Fetch color and depth for current sample.\n vec2 sampleCoord = texcoord + (getFilterTapOffset(tapInfo) * finalStep);\n if (all(greaterThanEqual(sampleCoord, vec2(0,0))) && all(lessThanEqual(sampleCoord, vec2(1.0,1.0)))) {\n float srcDepth = texture(depthMap, sampleCoord).x;\n vec4 srcSample = texture(sourceMap, sampleCoord);\n float weight = getFilterTapWeight(tapInfo);\n \n // If the difference in depth is huge, we lerp color back.\n float s = clamp(depthThreshold * distanceToProjectionWindow * filterScale * abs(srcDepth - sampleDepth), 0.0, 1.0);\n srcSample = mix(srcSample, sampleCenter, s);\n\n // Accumulate.\n srcBlurred += srcSample * weight;\n totalWeight += weight;\n }\n } \n \n if (totalWeight>0.0) {\n srcBlurred /= totalWeight;\n }\n return srcBlurred;\n}\n\n\n\nlayout(location = 0) in vec2 varTexCoord0;\n\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = pixelShaderGaussianDepthAware(varTexCoord0, vec2(1.0, 0.0), getViewportInvWidthHeight());\n}\n\n\n\n"
+ },
+ "KH6a9o7N1OosoXyKLBEv4w==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// subsurfaceScattering_makeSpecularBeckmann.frag\n//\n// Created by Sam Gateau on 6/30/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nfloat specularBeckmann(float ndoth, float roughness) {\n float alpha = acos(ndoth);\n float ta = tan(alpha);\n float val = 1.0 / (roughness * roughness * pow(ndoth, 4.0)) * exp(-(ta * ta) / (roughness * roughness));\n return val;\n}\n\nvoid main(void) {\n outFragColor = vec4(vec3(0.5 * pow( specularBeckmann(varTexCoord0.x, varTexCoord0.y), 0.1)), 1.0);\n}\n\n\n"
+ },
+ "KZyjNGIHl9JvqHP/edr1yg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n\n\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive+fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "KwPjt0+3ZrXGpMVUhBkSTg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:43 2018\n//\n// blurGaussianH.frag\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Olivier Prat on 09/25/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n#define BLUR_MAX_NUM_TAPS 33\nstruct BlurParameters {\n vec4 resolutionInfo;\n vec4 texcoordTransform;\n vec4 filterInfo;\n vec4 depthInfo;\n vec4 stereoInfo;\n vec4 linearDepthInfo;\n vec2 taps[BLUR_MAX_NUM_TAPS];\n};\n\nuniform blurParamsBuffer {\n BlurParameters parameters;\n};\n\nvec2 getViewportInvWidthHeight() {\n return parameters.resolutionInfo.zw;\n}\n\nvec2 evalTexcoordTransformed(vec2 texcoord) {\n return (texcoord * parameters.texcoordTransform.zw + parameters.texcoordTransform.xy);\n}\n\nfloat getFilterScale() {\n return parameters.filterInfo.x;\n}\n\nint getFilterNumTaps() {\n return int(parameters.filterInfo.y);\n}\n\nfloat getOutputAlpha() {\n return parameters.filterInfo.z;\n}\n\nvec2 getFilterTap(int index) {\n return parameters.taps[index];\n}\n\nfloat getFilterTapOffset(vec2 tap) {\n return tap.x;\n}\n\nfloat getFilterTapWeight(vec2 tap) {\n return tap.y;\n}\n\nfloat getDepthThreshold() {\n return parameters.depthInfo.x;\n}\n\nfloat getDepthPerspective() {\n return parameters.depthInfo.w;\n}\n\nfloat getPosLinearDepthFar() {\n return parameters.linearDepthInfo.x;\n}\n\n\n\nuniform sampler2D sourceMap;\n\nvec4 pixelShaderGaussian(vec2 texcoord, vec2 direction, vec2 pixelStep) {\n texcoord = evalTexcoordTransformed(texcoord);\n\n vec2 finalStep = getFilterScale() * direction * pixelStep;\n vec4 srcBlurred = vec4(0.0);\n float totalWeight = 0.f;\n int numTaps = getFilterNumTaps();\n \n for(int i = 0; i < numTaps; i++) {\n vec2 tapInfo = getFilterTap(i);\n // Fetch color for current sample.\n vec2 sampleCoord = texcoord + (getFilterTapOffset(tapInfo) * finalStep);\n if (all(greaterThanEqual(sampleCoord, vec2(0,0))) && all(lessThanEqual(sampleCoord, vec2(1.0,1.0)))) {\n vec4 srcSample = texture(sourceMap, sampleCoord);\n float weight = getFilterTapWeight(tapInfo);\n // Accumulate.\n srcBlurred += srcSample * weight;\n totalWeight += weight;\n }\n }\n \n if (totalWeight>0.0) {\n srcBlurred /= totalWeight;\n }\n srcBlurred.a = getOutputAlpha();\n return srcBlurred;\n}\n\n\n\n\nlayout(location = 0) in vec2 varTexCoord0;\n\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = pixelShaderGaussian(varTexCoord0, vec2(1.0, 0.0), getViewportInvWidthHeight());\n}\n\n\n\n"
+ },
+ "LZ9IXPsvRoIc6I+O+aAfXg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_shadow.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nvoid main(void) {\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "LckWrI5952bz7t4vrKO//w==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_lightmap_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out vec4 _positionWS;\n\nvoid main(void) {\n // pass along the color in linear space\n _color = color_sRGBToLinear(inColor.xyz);\n\n // and the texture coordinates\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord1.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_lightmap_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\n#endif\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nuniform sampler2D emissiveMap;\nvec3 fetchLightmapMap(vec2 uv) {\n vec2 emissiveParams = getTexMapArray()._lightmapParams.xy;\n return (vec3(emissiveParams.x) + emissiveParams.y * texture(emissiveMap, uv).rgb);\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\nlayout(location = 5) in vec4 _positionWS;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedo = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\n\n vec3 lightmapVal = fetchLightmapMap(_texCoord1);\n\n\n packDeferredFragmentLightmap(\n normalize(_normalWS), \n evalOpaqueFinalAlpha(getMaterialOpacity(mat), albedo.a),\n getMaterialAlbedo(mat) * albedo.rgb * _color,\n getMaterialRoughness(mat) * roughness,\n getMaterialMetallic(mat) * metallicTex,\n /*metallicTex, // no use of */getMaterialFresnel(mat),\n lightmapVal+fadeEmissive);\n}\n\n\n"
+ },
+ "LdUCA2vslLE/h50ip4PzRA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// forward_simple_textured_transparent.frag\n// fragment shader\n//\n// Created by Clement Brisset on 5/29/15.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n\n\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n\n\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n\n\n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlended(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) {\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n // FIXME - temporarily removed until we support it for forward...\n /* if ((hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }*/\n\n return color;\n}\n\n\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nlayout(location = 0) in vec3 _normalWS;\nlayout(location = 1) in vec4 _color;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec4 _positionES;\n\nlayout(location = 0) out vec4 _fragColor0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0);\n float colorAlpha = _color.a * texel.a;\n\n TransformCamera cam = getTransformCamera();\n vec3 fragPosition = _positionES.xyz;\n\n _fragColor0 = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n DEFAULT_OCCLUSION,\n fragPosition,\n normalize(_normalWS),\n _color.rgb * texel.rgb,\n DEFAULT_FRESNEL,\n DEFAULT_METALLIC,\n DEFAULT_EMISSIVE,\n DEFAULT_ROUGHNESS, colorAlpha),\n colorAlpha);\n}\n\n"
+ },
+ "M2eUZ1xdf1L900MdggmDRg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_textured_unlit.frag\n// fragment shader\n//\n// Created by Clement Brisset on 5/29/15.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n\n\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n\n\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0.st);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n\n const float ALPHA_THRESHOLD = 0.999;\n if (colorAlpha * texel.a < ALPHA_THRESHOLD) {\n packDeferredFragmentTranslucent(\n normalize(_normalWS),\n colorAlpha * texel.a,\n _color.rgb * texel.rgb,\n DEFAULT_FRESNEL,\n DEFAULT_ROUGHNESS);\n } else {\n packDeferredFragmentUnlit(\n normalize(_normalWS),\n 1.0,\n _color.rgb * texel.rgb);\n }\n}\n\n"
+ },
+ "Mw8Z4HCjs1MrjPNC9w6kUQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTexture.frag\n//\n// Draw texture 0 fetched at texcoord.xy\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nuniform sampler2D colorMap;\n\nlayout(location = 0) in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = texture(colorMap, varTexCoord0);\n}\n\n\n"
+ },
+ "Ns2pr7qGfnAWa/Z2I70M1A==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// subsurfaceScattering_makeLUT.frag\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nfloat gaussian(float v, float r) {\n const float _PI = 3.14159265358979523846;\n return (1.0 / sqrt(2.0 * _PI * v)) * exp(-(r*r) / (2.0 * v));\n}\n\nvec3 scatter(float r) {\n // r is the distance expressed in millimeter\n // returns the scatter reflectance\n // Values from GPU Gems 3 \"Advanced Skin Rendering\".\n // Originally taken from real life samples.\n const vec4 profile[6] = vec4[6](\n vec4(0.0064, 0.233, 0.455, 0.649),\n vec4(0.0484, 0.100, 0.336, 0.344),\n vec4(0.1870, 0.118, 0.198, 0.000),\n vec4(0.5670, 0.113, 0.007, 0.007),\n vec4(1.9900, 0.358, 0.004, 0.000),\n vec4(7.4100, 0.078, 0.000, 0.000)\n );\n const int profileNum = 6;\n\n vec3 ret = vec3(0.0);\n for (int i = 0; i < profileNum; i++) {\n float v = profile[i].x * 1.414;\n float g = gaussian(v, r);\n ret += g * profile[i].yzw;\n }\n\n return ret;\n}\n\n\nvec3 integrate(float cosTheta, float skinRadius) {\n // Angle from lighting direction.\n float theta = acos(cosTheta);\n vec3 totalWeights = vec3(0.0);\n vec3 totalLight = vec3(0.0);\n\n const float _PI = 3.14159265358979523846;\n const float step = 2.0 * _PI / float(2000);\n float a = -(_PI);\n\n\n while (a <= (_PI)) {\n float sampleAngle = theta + a;\n float diffuse = clamp(cos(sampleAngle), 0.0, 1.0);\n //if (diffuse < 0.0) diffuse = 0.0;\n //if (diffuse > 1.0) diffuse = 1.0;\n\n // Distance.\n float sampleDist = abs(2.0 * skinRadius * sin(a * 0.5));\n\n // Profile Weight.\n vec3 weights = scatter(sampleDist);\n\n totalWeights += weights;\n totalLight += diffuse * weights;\n a += step;\n }\n\n vec3 result = (totalLight / totalWeights);\n return clamp(result, vec3(0.0), vec3(1.0));\n\n}\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nvoid main(void) {\n\n // Lookup by: x: NDotL y: 1 / r\n //float y = 2.0 * 1.0 / ((j + 1.0) / (double)height);\n //float x = ((i / (double)width) * 2.0) - 1.0;\n\n outFragColor = vec4(integrate(varTexCoord0.x * 2.0 - 1.0, 2.0 / varTexCoord0.y), 1.0);\n}\n\n\n\n"
+ },
+ "NxqGRz3opuTH2bY3EmjfOA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// overlay3D.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 _color;\nout float _alpha;\nout vec2 _texCoord0;\nout vec4 _positionES;\nout vec3 _normalWS;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n _texCoord0 = inTexCoord0.st;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// overlay3D_unlit.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 2/2/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nuniform sampler2D originalTexture;\n\nin vec2 _texCoord0;\nin vec3 _color;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec4 albedo = texture(originalTexture, _texCoord0);\n\n if (albedo.a <= 0.1) {\n discard;\n }\n vec4 color = vec4(albedo.rgb * _color, albedo.a);\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n\n"
+ },
+ "O0nFCglOBDxnQ6h8RZJ0iw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// overlay3D.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 _color;\nout float _alpha;\nout vec2 _texCoord0;\nout vec4 _positionES;\nout vec3 _normalWS;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n _texCoord0 = inTexCoord0.st;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n// overlay3D.frag\n// fragment shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n\n\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n\n\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\nvec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 fresnel, float roughness, float opacity) {\n\n // Need the light now\n Light light = getKeyLight();\n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n LightAmbient ambient = getLightAmbient();\n\n TransformCamera cam = getTransformCamera();\n vec3 fragEyeVectorView = normalize(-position);\n vec3 fragEyeDir;\n { // transformEyeToWorldDir\n fragEyeDir = vec3(cam._viewInverse * vec4(fragEyeVectorView.xyz, 0.0));\n }\n\n\n SurfaceData surface = initSurfaceData(roughness, normal, fragEyeDir);\n\n vec3 color = opacity * albedo * getLightColor(light) * getLightAmbientIntensity(ambient);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse * isDiffuseEnabled() * isDirectionalEnabled();\n color += directionalSpecular * isSpecularEnabled() * isDirectionalEnabled();\n\n return vec4(color, opacity);\n}\n\nuniform sampler2D originalTexture;\n\nin vec2 _texCoord0;\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec4 albedo = texture(originalTexture, _texCoord0);\n\n vec3 fragPosition = _positionES.xyz;\n vec3 fragNormal = normalize(_normalWS);\n vec3 fragAlbedo = albedo.rgb * _color;\n float fragMetallic = 0.0;\n vec3 fragSpecular = vec3(0.1);\n float fragRoughness = 0.9;\n float fragOpacity = albedo.a;\n\n if (fragOpacity <= 0.1) {\n discard;\n }\n\n vec4 color = evalGlobalColor(1.0,\n fragPosition,\n fragNormal,\n fragAlbedo,\n fragMetallic,\n fragSpecular,\n fragRoughness,\n fragOpacity);\n\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n\n"
+ },
+ "OBUMls6ehY6dnh+ktMF4Rw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:43 2018\n//\n// blurGaussianDepthAwareV.frag\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Olivier Prat on 09/25/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n#define BLUR_MAX_NUM_TAPS 33\nstruct BlurParameters {\n vec4 resolutionInfo;\n vec4 texcoordTransform;\n vec4 filterInfo;\n vec4 depthInfo;\n vec4 stereoInfo;\n vec4 linearDepthInfo;\n vec2 taps[BLUR_MAX_NUM_TAPS];\n};\n\nuniform blurParamsBuffer {\n BlurParameters parameters;\n};\n\nvec2 getViewportInvWidthHeight() {\n return parameters.resolutionInfo.zw;\n}\n\nvec2 evalTexcoordTransformed(vec2 texcoord) {\n return (texcoord * parameters.texcoordTransform.zw + parameters.texcoordTransform.xy);\n}\n\nfloat getFilterScale() {\n return parameters.filterInfo.x;\n}\n\nint getFilterNumTaps() {\n return int(parameters.filterInfo.y);\n}\n\nfloat getOutputAlpha() {\n return parameters.filterInfo.z;\n}\n\nvec2 getFilterTap(int index) {\n return parameters.taps[index];\n}\n\nfloat getFilterTapOffset(vec2 tap) {\n return tap.x;\n}\n\nfloat getFilterTapWeight(vec2 tap) {\n return tap.y;\n}\n\nfloat getDepthThreshold() {\n return parameters.depthInfo.x;\n}\n\nfloat getDepthPerspective() {\n return parameters.depthInfo.w;\n}\n\nfloat getPosLinearDepthFar() {\n return parameters.linearDepthInfo.x;\n}\n\n\n\nuniform sampler2D sourceMap;\nuniform sampler2D depthMap;\n\nvec4 pixelShaderGaussianDepthAware(vec2 texcoord, vec2 direction, vec2 pixelStep) {\n texcoord = evalTexcoordTransformed(texcoord);\n float sampleDepth = texture(depthMap, texcoord).x;\n if (sampleDepth >= getPosLinearDepthFar()) {\n discard;\n }\n vec4 sampleCenter = texture(sourceMap, texcoord);\n\n // Calculate the width scale.\n float distanceToProjectionWindow = getDepthPerspective();\n\n float depthThreshold = getDepthThreshold();\n\n // Calculate the final step to fetch the surrounding pixels.\n float filterScale = getFilterScale();\n float scale = distanceToProjectionWindow / sampleDepth;\n\n vec2 finalStep = filterScale * scale * direction * pixelStep;\n int numTaps = getFilterNumTaps();\n\n // Accumulate the center sample\n vec2 tapInfo = getFilterTap(0);\n float totalWeight = getFilterTapWeight(tapInfo);\n vec4 srcBlurred = sampleCenter * totalWeight;\n\n for(int i = 1; i < numTaps; i++) {\n tapInfo = getFilterTap(i);\n\n // Fetch color and depth for current sample.\n vec2 sampleCoord = texcoord + (getFilterTapOffset(tapInfo) * finalStep);\n if (all(greaterThanEqual(sampleCoord, vec2(0,0))) && all(lessThanEqual(sampleCoord, vec2(1.0,1.0)))) {\n float srcDepth = texture(depthMap, sampleCoord).x;\n vec4 srcSample = texture(sourceMap, sampleCoord);\n float weight = getFilterTapWeight(tapInfo);\n \n // If the difference in depth is huge, we lerp color back.\n float s = clamp(depthThreshold * distanceToProjectionWindow * filterScale * abs(srcDepth - sampleDepth), 0.0, 1.0);\n srcSample = mix(srcSample, sampleCenter, s);\n\n // Accumulate.\n srcBlurred += srcSample * weight;\n totalWeight += weight;\n }\n } \n \n if (totalWeight>0.0) {\n srcBlurred /= totalWeight;\n }\n return srcBlurred;\n}\n\n\n\nlayout(location = 0) in vec2 varTexCoord0;\n\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = pixelShaderGaussianDepthAware(varTexCoord0, vec2(0.0, 1.0), getViewportInvWidthHeight());\n}\n\n\n\n"
+ },
+ "OXuYqz7HXHKCvj2UC5LgYg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/04/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// Generated on Wed May 23 14:24:07 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nout vec4 _fadeData1;\nout vec4 _fadeData2;\nout vec4 _fadeData3;\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\nout vec4 _positionWS;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n _fadeData1 = inTexCoord2;\n _fadeData2 = inTexCoord3;\n _fadeData3 = inTexCoord4; \n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_transparent_textured_unlit_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n// the albedo texture\nuniform sampler2D originalTexture;\n\nin vec4 _color;\nin vec2 _texCoord0;\nin vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor0;\n\n// Declare after all samplers to prevent sampler location mix up with originalTexture\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nin vec4 _fadeData1;\nin vec4 _fadeData2;\nin vec4 _fadeData3;\n\n\n\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = int(_fadeData1.w);\n fadeParams.threshold = _fadeData2.w;\n fadeParams.noiseOffset = _fadeData1.xyz;\n fadeParams.baseOffset = _fadeData2.xyz;\n fadeParams.baseInvSize = _fadeData3.xyz;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n vec4 texel = texture(originalTexture, _texCoord0.st);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n _fragColor0 = vec4(_color.rgb * texel.rgb + fadeEmissive, colorAlpha * texel.a);\n}\n\n"
+ },
+ "Ok9iZWcsGL4q0bLcSA15DQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\n\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "OnbojG95NhmpyDQOx0iTzQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n// model_translucent_fade.frag\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n\n\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\n\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n fragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "OrApvS3xLxHRsw+fYw1FGg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_textured_unlit.frag\n// fragment shader\n//\n// Created by Clement Brisset on 5/29/15.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n\n\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n\n\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0.st);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n\n const float ALPHA_THRESHOLD = 0.999;\n if (colorAlpha * texel.a < ALPHA_THRESHOLD) {\n packDeferredFragmentTranslucent(\n normalize(_normalWS),\n colorAlpha * texel.a,\n _color.rgb * texel.rgb,\n DEFAULT_FRESNEL,\n DEFAULT_ROUGHNESS);\n } else {\n packDeferredFragmentUnlit(\n normalize(_normalWS),\n 1.0,\n _color.rgb * texel.rgb);\n }\n}\n\n"
+ },
+ "P6OwkvLRGTZTEznF6VYYzw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_shadow_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec4 _positionWS;\n\nvoid main(void) {\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "PenRxjxfyu6BZ1zAlxzVsg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\n\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// skin_model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "PpGze5SXPBQ8cU4OIPl3iQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_translucent_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 23/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n\n\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec3 _tangentWS;\nin vec3 _color;\nin float _alpha;\nin vec4 _positionWS;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n int matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "PvKsMxZ7XmmybJMzNJjbQg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n// model_translucent_fade.frag\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n\n\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\n\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n fragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "QFC2mrxkRobv+zJiIHMbUA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_shadow_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec4 _positionWS;\n\nvoid main(void) {\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "QK0w4Yh9tGTneCM+4XmuWQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_specular_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "Qjow7Vm0FAyIwkizx143yA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// directional_ambient_light_shadow.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 1/18/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for Shadows\n#ifdef __cplusplus\n# define MAT4 glm::mat4\n#else\n# define MAT4 mat4\n#endif\n\n#define SHADOW_CASCADE_MAX_COUNT 4\n\nstruct ShadowTransform {\n\tMAT4 reprojection;\n\tfloat fixedBias;\n float slopeBias;\n float _padding1;\n float _padding2;\n};\n\nstruct ShadowParameters {\n ShadowTransform cascades[SHADOW_CASCADE_MAX_COUNT];\n int cascadeCount;\n float invMapSize;\n float invCascadeBlendWidth;\n float maxDistance;\n float invFalloffDistance;\n};\n\n// //\nlayout(std140) uniform shadowTransformBuffer {\n\tShadowParameters shadow;\n};\n\nint getShadowCascadeCount() {\n return shadow.cascadeCount;\n}\n\nfloat getShadowCascadeInvBlendWidth() {\n return shadow.invCascadeBlendWidth;\n}\n\nfloat evalShadowFalloff(float depth) {\n return clamp((shadow.maxDistance-depth) * shadow.invFalloffDistance, 0.0, 1.0);\n}\n\nmat4 getShadowReprojection(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].reprojection;\n}\n\nfloat getShadowScale() {\n\treturn shadow.invMapSize;\n}\n\nfloat getShadowFixedBias(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].fixedBias;\n}\n\nfloat getShadowSlopeBias(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].slopeBias;\n}\n\n\n// Compute the texture coordinates from world coordinates\nvec4 evalShadowTexcoord(int cascadeIndex, vec4 position) {\n\tvec4 shadowCoord = getShadowReprojection(cascadeIndex) * position;\n\treturn vec4(shadowCoord.xyz, 1.0);\n}\n\nbool isShadowCascadeProjectedOnPixel(vec4 cascadeTexCoords) {\n bvec2 greaterThanZero = greaterThan(cascadeTexCoords.xy, vec2(0));\n bvec2 lessThanOne = lessThan(cascadeTexCoords.xy, vec2(1));\n return all(greaterThanZero) && all(lessThanOne);\n}\n\nint getFirstShadowCascadeOnPixel(int startCascadeIndex, vec4 worldPosition, out vec4 cascadeShadowCoords) {\n int cascadeIndex;\n startCascadeIndex = min(startCascadeIndex, getShadowCascadeCount()-1);\n for (cascadeIndex=startCascadeIndex ; cascadeIndex> 1;\n#endif\n\n // Offset for efficient PCF, see http://http.developer.nvidia.com/GPUGems/gpugems_ch11.html\n ivec2 offset = coords & ivec2(1,1);\n offset.y = (offset.x+offset.y) & 1;\n\n offsets.points[0] = shadowScale * vec3(offset + PCFkernel[0], 0.0);\n offsets.points[1] = shadowScale * vec3(offset + PCFkernel[1], 0.0);\n offsets.points[2] = shadowScale * vec3(offset + PCFkernel[2], 0.0);\n offsets.points[3] = shadowScale * vec3(offset + PCFkernel[3], 0.0);\n\n return offsets;\n}\n\nfloat evalShadowAttenuationPCF(int cascadeIndex, ShadowSampleOffsets offsets, vec4 shadowTexcoord, float bias) {\n shadowTexcoord.z -= bias;\n float shadowAttenuation = 0.25 * (\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[0]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[1]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[2]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[3])\n );\n return shadowAttenuation;\n}\n\nfloat evalShadowCascadeAttenuation(int cascadeIndex, ShadowSampleOffsets offsets, vec4 shadowTexcoord, float oneMinusNdotL) {\n float bias = getShadowFixedBias(cascadeIndex) + getShadowSlopeBias(cascadeIndex) * oneMinusNdotL;\n return evalShadowAttenuationPCF(cascadeIndex, offsets, shadowTexcoord, bias);\n}\n\nfloat evalShadowAttenuation(vec3 worldLightDir, vec4 worldPosition, float viewDepth, vec3 worldNormal) {\n ShadowSampleOffsets offsets = evalShadowFilterOffsets(worldPosition);\n vec4 cascadeShadowCoords[2];\n cascadeShadowCoords[0] = vec4(0);\n cascadeShadowCoords[1] = vec4(0);\n ivec2 cascadeIndices;\n float cascadeMix = determineShadowCascadesOnPixel(worldPosition, viewDepth, cascadeShadowCoords, cascadeIndices);\n\t\n // Adjust bias if we are at a grazing angle with light\n float oneMinusNdotL = 1.0 - clamp(dot(worldLightDir, worldNormal), 0, 1);\n vec2 cascadeAttenuations = vec2(1.0, 1.0);\n cascadeAttenuations.x = evalShadowCascadeAttenuation(cascadeIndices.x, offsets, cascadeShadowCoords[0], oneMinusNdotL);\n if (cascadeMix > 0.0 && cascadeIndices.y < getShadowCascadeCount()) {\n cascadeAttenuations.y = evalShadowCascadeAttenuation(cascadeIndices.y, offsets, cascadeShadowCoords[1], oneMinusNdotL);\n }\n float attenuation = mix(cascadeAttenuations.x, cascadeAttenuations.y, cascadeMix);\n // Falloff to max distance\n return mix(1.0, attenuation, evalShadowFalloff(viewDepth));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n\n\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\n\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\n\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\n\n\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 normal, vec3 albedo, vec3 lightmap) {\n Light light = getKeyLight();\n LightAmbient ambient = getLightAmbient();\n\n // Catch normals perpendicular to the projection plane, hence the magic number for the threshold\n // It should be just 0, but we have inaccuracy so we overshoot\n const float PERPENDICULAR_THRESHOLD = -0.005;\n vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0)); // transform to worldspace\n float diffuseDot = dot(fragNormal, -getLightDirection(light));\n float facingLight = step(PERPENDICULAR_THRESHOLD, diffuseDot); \n\n // Reevaluate the shadow attenuation for light facing fragments\n float lightAttenuation = (1.0 - facingLight) + facingLight * shadowAttenuation;\n\n // Diffuse light is the lightmap dimmed by shadow\n vec3 diffuseLight = lightAttenuation * lightmap;\n\n // Ambient light is the lightmap when in shadow\n vec3 ambientLight = (1.0 - lightAttenuation) * lightmap * getLightAmbientIntensity(ambient);\n\n return isLightmapEnabled() * obscurance * albedo * (diffuseLight + ambientLight);\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nfloat curvatureAO(in float k) {\n return 1.0f - (0.0022f * k * k) + (0.0776f * k) + 0.7369f;\n}\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nif (scattering * isScatteringEnabled() > 0.0) {\n float ambientOcclusion = curvatureAO(lowNormalCurvature.w * 20.0f) * 0.5f;\n\n\n float ambientOcclusionHF = curvatureAO(midNormalCurvature.w * 8.0f) * 0.5f;\n ambientOcclusion = min(ambientOcclusion, ambientOcclusionHF);\n\n obscurance = min(obscurance, ambientOcclusion);\n\n // Diffuse from ambient\n diffuse = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceLowNormal).xyz;\n\n // Scattering ambient specular is the same as non scattering for now\n // TODO: we should use the same specular answer as for direct lighting\n }\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\nvec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,\nvec3 albedo, vec3 fresnel, float metallic, float roughness\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += ambientDiffuse;\n color += ambientSpecular;\n\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += directionalDiffuse;\n color += directionalSpecular;\n\n return color;\n}\n\n\n\nlayout(location = 0) in vec2 _texCoord0;\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);\n\n vec4 viewPos = vec4(frag.position.xyz, 1.0);\n vec4 worldPos = getViewInverse() * viewPos;\n Light shadowLight = getKeyLight();\n vec3 worldLightDirection = getLightDirection(shadowLight);\n float shadowAttenuation = evalShadowAttenuation(worldLightDirection, worldPos, -viewPos.z, frag.normal);\n\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n } else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {\n discard;\n } else {\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);\n }\n vec3 color = evalAmbientSphereGlobalColor(\n getViewInverse(),\n shadowAttenuation,\n frag.obscurance,\n frag.position.xyz,\n frag.normal,\n frag.albedo,\n frag.fresnel,\n frag.metallic,\n frag.roughness,\n frag.scattering,\n midNormalCurvature,\n lowNormalCurvature);\n \n _fragColor = vec4(color, 1.0);\n }\n}\n\n\n"
+ },
+ "QolYhWOxFZg7j6BuKVTkNA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n// model_translucent_fade.frag\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n\n\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\n\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n fragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "RBD7sDmFLNuS6SYIBsx+hQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// directional_ambient_light.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 9/3/14.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n\n\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n\n\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n\n\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 normal, vec3 albedo, vec3 lightmap) {\n Light light = getKeyLight();\n LightAmbient ambient = getLightAmbient();\n\n\n\n // Catch normals perpendicular to the projection plane, hence the magic number for the threshold\n // It should be just 0, but we have inaccuracy so we overshoot\n const float PERPENDICULAR_THRESHOLD = -0.005;\n vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0)); // transform to worldspace\n float diffuseDot = dot(fragNormal, -getLightDirection(light));\n float facingLight = step(PERPENDICULAR_THRESHOLD, diffuseDot); \n\n // Reevaluate the shadow attenuation for light facing fragments\n float lightAttenuation = (1.0 - facingLight) + facingLight * shadowAttenuation;\n\n // Diffuse light is the lightmap dimmed by shadow\n vec3 diffuseLight = lightAttenuation * lightmap;\n\n // Ambient light is the lightmap when in shadow\n vec3 ambientLight = (1.0 - lightAttenuation) * lightmap * getLightAmbientIntensity(ambient);\n\n return isLightmapEnabled() * obscurance * albedo * (diffuseLight + ambientLight);\n}\n\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nfloat curvatureAO(in float k) {\n return 1.0f - (0.0022f * k * k) + (0.0776f * k) + 0.7369f;\n}\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nif (scattering * isScatteringEnabled() > 0.0) {\n float ambientOcclusion = curvatureAO(lowNormalCurvature.w * 20.0f) * 0.5f;\n float ambientOcclusionHF = curvatureAO(midNormalCurvature.w * 8.0f) * 0.5f;\n ambientOcclusion = min(ambientOcclusion, ambientOcclusionHF);\n\n obscurance = min(obscurance, ambientOcclusion);\n\n // Diffuse from ambient\n diffuse = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceLowNormal).xyz;\n\n // Scattering ambient specular is the same as non scattering for now\n // TODO: we should use the same specular answer as for direct lighting\n }\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\nvec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,\nvec3 albedo, vec3 fresnel, float metallic, float roughness\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += ambientDiffuse;\n color += ambientSpecular;\n\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += directionalDiffuse;\n color += directionalSpecular;\n\n return color;\n}\n\n\n\n\nlayout(location = 0) in vec2 _texCoord0;\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);\n\n float shadowAttenuation = 1.0;\n\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n } else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {\n discard;\n } else {\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);\n }\n\n vec3 color = evalAmbientSphereGlobalColor(\n getViewInverse(),\n shadowAttenuation,\n frag.obscurance,\n frag.position.xyz,\n frag.normal,\n frag.albedo,\n frag.fresnel,\n frag.metallic,\n frag.roughness,\n frag.scattering,\n midNormalCurvature,\n lowNormalCurvature);\n _fragColor = vec4(color, 1.0);\n\n }\n}\n\n\n"
+ },
+ "RjdepXqRdX+qe5lSXqd2qA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// local_lights_shading.frag\n// fragment shader\n//\n// Created by Sam Gateau on 9/6/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about deferred buffer\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\n\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform lightBuffer {\n\n\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n\n\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n\n\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}in vec2 _texCoord0;\nout vec4 _fragColor;\n\nvoid main(void) {\n _fragColor = vec4(0.0);\n\n // Grab the fragment data from the uv\n vec2 texCoord = _texCoord0.st;\n\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n\n\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, texCoord);\n vec4 fragPosition = frag.position;\n\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n }\n\n // Frag pos in world\n mat4 invViewMat = getViewInverse();\n vec4 fragWorldPos = invViewMat * fragPosition;\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(fragWorldPos);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (!hasLocalLights(numLights, clusterPos, dims)) {\n discard;\n }\n\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(texCoord, midNormalCurvature, lowNormalCurvature);\n }\n\n\n // Frag to eye vec\n vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);\n vec3 fragEyeDir = normalize(fragEyeVector.xyz);\n SurfaceData surface = initSurfaceData(frag.roughness, frag.normal, fragEyeDir);\n\n _fragColor = evalLocalLighting(cluster, numLights, fragWorldPos.xyz, surface, \n frag.metallic, frag.fresnel, frag.albedo, frag.scattering, \n midNormalCurvature, lowNormalCurvature, 1.0);\n\n}\n\n\n\n"
+ },
+ "RvmQNF6dKH4aQ6NwzXCEkA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// velocityBuffer_cameraMotion.frag\n//\n// Created by Sam Gateau on 6/3/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nuniform sampler2D depthMap;\n\n\nvoid main(void) {\n // Pixel being shaded\n ivec2 pixelPos;\n vec2 texcoordPos;\n ivec4 stereoSide;\n ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);\n \n\tfloat Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;\n\n\t// The position of the pixel fragment in Eye space then in world space\n vec3 eyePos = evalUnjitteredEyePositionFromZdb(stereoSide.x, Zdb, texcoordPos);\n\tvec3 worldPos = (getViewInverse() * vec4(eyePos, 1.0)).xyz;\n \n vec3 prevEyePos = (getPreviousView() * vec4(worldPos, 1.0)).xyz;\n vec4 prevClipPos = (getUnjitteredProjection(stereoSide.x) * vec4(prevEyePos, 1.0));\n vec2 prevUV = 0.5 * (prevClipPos.xy / prevClipPos.w) + vec2(0.5);\n\n //vec2 imageSize = getWidthHeight(0);\n vec2 imageSize = vec2(1.0, 1.0);\n outFragColor = vec4( ((texcoordPos - prevUV) * imageSize), 0.0, 0.0);\n}\n\n\n"
+ },
+ "U/JRfxWikaERXEtfd3D7iw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// skin_model_shadow_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\n\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// skin_model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/08/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "UtSPZV6j2V+sfW5h8VKzdQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// taa.frag\n// fragment shader\n//\n// Created by Sam Gateau on 8/14/2017\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// TAA.slh\n// Common component needed by TemporalAntialiasing fragment shader\n//\n// Created by Sam Gateau on 8/17/2017\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\nuniform sampler2D depthMap;\nuniform sampler2D sourceMap;\nuniform sampler2D historyMap;\nuniform sampler2D velocityMap;\nuniform sampler2D nextMap;\n\nstruct TAAParams\n{\n\tfloat none;\n\tfloat blend;\n\tfloat covarianceGamma;\n\tfloat debugShowVelocityThreshold;\n ivec4 flags;\n vec4 pixelInfo_orbZoom;\n vec4 regionInfo;\n};\n\nlayout(std140) uniform taaParamsBuffer {\n TAAParams params;\n};\n\n#define GET_BIT(bitfield, bitIndex) bool((bitfield) & (1 << (bitIndex)))\n\nbool taa_isDebugEnabled() {\n return GET_BIT(params.flags.x, 0);\n}\n\nbool taa_showDebugCursor() {\n return GET_BIT(params.flags.x, 1);\n}\n\nbool taa_showClosestFragment() {\n return GET_BIT(params.flags.x, 3);\n}\n\nbool taa_constrainColor() {\n return GET_BIT(params.flags.y, 1);\n}\n\nbool taa_feedbackColor() {\n return GET_BIT(params.flags.y, 4);\n}\n\nvec2 taa_getDebugCursorTexcoord() {\n return params.pixelInfo_orbZoom.xy;\n}\n\nfloat taa_getDebugOrbZoom() {\n return params.pixelInfo_orbZoom.z;\n}\n\nvec2 taa_getRegionDebug() {\n return params.regionInfo.xy;\n}\n\nvec2 taa_getRegionFXAA() {\n return params.regionInfo.zw;\n}\n#define USE_YCOCG 1\n\nvec4 taa_fetchColor(sampler2D map, vec2 uv) {\n\tvec4 c = texture(map, uv);\n\t// Apply rapid pseudo tonemapping as TAA is applied to a tonemapped image, using luminance as weight, as proposed in\n\t// https://de45xmedrsdbp.cloudfront.net/Resources/files/TemporalAA_small-59732822.pdf\n\tfloat lum = dot(vec3(0.3,0.5,0.2),c.rgb);\n\tc.rgb = c.rgb / (1.0+lum);\n#if USE_YCOCG\n\treturn vec4(color_LinearToYCoCg(c.rgb), c.a);\n#else\n\treturn c;\n#endif\n}\n\nvec3 taa_resolveColor(vec3 color) {\n#if USE_YCOCG\n\tcolor = max(vec3(0), color_YCoCgToUnclampedLinear(color));\n#endif\n\t// Apply rapid inverse tonemapping, using luminance as weight, as proposed in\n\t// https://de45xmedrsdbp.cloudfront.net/Resources/files/TemporalAA_small-59732822.pdf\n\tfloat lum = dot(vec3(0.3,0.5,0.2),color.rgb);\n\tcolor = color / (1.0-lum);\n\treturn color;\n}\n\nvec4 taa_fetchSourceMap(vec2 uv) {\n\treturn taa_fetchColor(sourceMap, uv);\n}\n\nvec4 taa_fetchHistoryMap(vec2 uv) {\n\treturn taa_fetchColor(historyMap, uv);\n}\n\nvec4 taa_fetchNextMap(vec2 uv) {\n\treturn taa_fetchColor(nextMap, uv);\n}\n\nvec2 taa_fetchVelocityMap(vec2 uv) {\n\treturn texture(velocityMap, uv).xy;\n}\n\nfloat taa_fetchDepth(vec2 uv) {\n\treturn -texture(depthMap, vec2(uv), 0).x;\n}\n\n\n#define ZCMP_GT(a, b) (a > b)\n\nvec2 taa_getImageSize() {\n vec2 imageSize = getWidthHeight(0);\n if (isStereo()) {\n imageSize.x *= 2.0;\n }\n return imageSize;\n}\n\nvec2 taa_getTexelSize() {\n vec2 texelSize = getInvWidthHeight();\n if (isStereo()) {\n texelSize.x *= 0.5;\n }\n return texelSize;\n}\n\nvec3 taa_findClosestFragment3x3(vec2 uv)\n{\n\tvec2 dd = abs(taa_getTexelSize());\n\tvec2 du = vec2(dd.x, 0.0);\n\tvec2 dv = vec2(0.0, dd.y);\n\n\tvec3 dtl = vec3(-1, -1, taa_fetchDepth(uv - dv - du));\n\tvec3 dtc = vec3( 0, -1, taa_fetchDepth(uv - dv));\n\tvec3 dtr = vec3( 1, -1, taa_fetchDepth(uv - dv + du));\n\n\tvec3 dml = vec3(-1, 0, taa_fetchDepth(uv - du));\n\tvec3 dmc = vec3( 0, 0, taa_fetchDepth(uv));\n\tvec3 dmr = vec3( 1, 0, taa_fetchDepth(uv + du));\n\n\tvec3 dbl = vec3(-1, 1, taa_fetchDepth(uv + dv - du));\n\tvec3 dbc = vec3( 0, 1, taa_fetchDepth(uv + dv));\n\tvec3 dbr = vec3( 1, 1, taa_fetchDepth(uv + dv + du));\n\n\tvec3 dmin = dtl;\n\tif (ZCMP_GT(dmin.z, dtc.z)) dmin = dtc;\n\tif (ZCMP_GT(dmin.z, dtr.z)) dmin = dtr;\n\n\tif (ZCMP_GT(dmin.z, dml.z)) dmin = dml;\n\tif (ZCMP_GT(dmin.z, dmc.z)) dmin = dmc;\n\tif (ZCMP_GT(dmin.z, dmr.z)) dmin = dmr;\n\n\tif (ZCMP_GT(dmin.z, dbl.z)) dmin = dbl;\n\tif (ZCMP_GT(dmin.z, dbc.z)) dmin = dbc;\n\n\n\tif (ZCMP_GT(dmin.z, dbr.z)) dmin = dbr;\n\n\treturn vec3(uv + dd.xy * dmin.xy, dmin.z);\n}\n\nvec2 taa_fetchVelocityMapBest(vec2 uv) {\n vec2 dd = abs(taa_getTexelSize());\n vec2 du = vec2(dd.x, 0.0);\n vec2 dv = vec2(0.0, dd.y);\n\n vec2 dtl = taa_fetchVelocityMap(uv - dv - du);\n vec2 dtc = taa_fetchVelocityMap(uv - dv);\n vec2 dtr = taa_fetchVelocityMap(uv - dv + du);\n\n vec2 dml = taa_fetchVelocityMap(uv - du);\n vec2 dmc = taa_fetchVelocityMap(uv);\n vec2 dmr = taa_fetchVelocityMap(uv + du);\n\n vec2 dbl = taa_fetchVelocityMap(uv + dv - du);\n vec2 dbc = taa_fetchVelocityMap(uv + dv);\n vec2 dbr = taa_fetchVelocityMap(uv + dv + du);\n\n vec3 best = vec3(dtl, dot(dtl,dtl));\n\n float testSpeed = dot(dtc,dtc);\n if (testSpeed > best.z) { best = vec3(dtc, testSpeed); }\n testSpeed = dot(dtr,dtr);\n if (testSpeed > best.z) { best = vec3(dtr, testSpeed); }\n\n testSpeed = dot(dml,dml);\n if (testSpeed > best.z) { best = vec3(dml, testSpeed); }\n testSpeed = dot(dmc,dmc);\n if (testSpeed > best.z) { best = vec3(dmc, testSpeed); }\n testSpeed = dot(dmr,dmr);\n if (testSpeed > best.z) { best = vec3(dmr, testSpeed); }\n\n testSpeed = dot(dbl,dbl);\n if (testSpeed > best.z) { best = vec3(dbl, testSpeed); }\n testSpeed = dot(dbc,dbc);\n if (testSpeed > best.z) { best = vec3(dbc, testSpeed); }\n testSpeed = dot(dbr,dbr);\n if (testSpeed > best.z) { best = vec3(dbr, testSpeed); }\n\n return best.xy;\n}\n\nvec2 taa_fromFragUVToEyeUVAndSide(vec2 fragUV, out int stereoSide) {\n vec2 eyeUV = fragUV;\n stereoSide = 0;\n if (isStereo()) {\n if (eyeUV.x > 0.5) {\n eyeUV.x -= 0.5;\n stereoSide = 1;\n }\n eyeUV.x *= 2.0;\n }\n return eyeUV;\n}\n\nvec2 taa_fromEyeUVToFragUV(vec2 eyeUV, int stereoSide) {\n vec2 fragUV = eyeUV;\n if (isStereo()) {\n fragUV.x *= 0.5;\n fragUV.x += stereoSide*0.5;\n }\n return fragUV;\n}\n\nvec2 taa_computePrevFragAndEyeUV(vec2 fragUV, vec2 fragVelocity, out vec2 prevEyeUV) {\n int stereoSide = 0;\n vec2 eyeUV = taa_fromFragUVToEyeUVAndSide(fragUV, stereoSide);\n prevEyeUV = eyeUV - fragVelocity;\n return taa_fromEyeUVToFragUV(prevEyeUV, stereoSide);\n}\n\nvec2 taa_fetchSourceAndHistory(vec2 fragUV, vec2 fragVelocity, out vec3 sourceColor, out vec3 historyColor) {\n vec2 prevEyeUV;\n vec2 prevFragUV = taa_computePrevFragAndEyeUV(fragUV, fragVelocity, prevEyeUV);\n sourceColor = taa_fetchSourceMap(fragUV).xyz;\n\n historyColor = sourceColor;\n if (!(any(lessThan(prevEyeUV, vec2(0.0))) || any(greaterThan(prevEyeUV, vec2(1.0))))) {\n historyColor = taa_fetchHistoryMap(prevFragUV).xyz;\n }\n return prevFragUV;\n}\n\nfloat Luminance(vec3 rgb) {\n return rgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0;\n}\n\n#define MINMAX_3X3_ROUNDED 1\n\nmat3 taa_evalNeighbourColorVariance(vec3 sourceColor, vec2 fragUV, vec2 fragVelocity) {\n vec2 texelSize = taa_getTexelSize();\n \n\n\tvec2 du = vec2(texelSize.x, 0.0);\n\tvec2 dv = vec2(0.0, texelSize.y);\n\n vec3 sampleColor = taa_fetchSourceMap(fragUV - dv - du).rgb;\n vec3 sumSamples = sampleColor;\n vec3 sumSamples2 = sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV - dv).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV - dv + du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV - du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = sourceColor; //taa_fetchSourceMap(fragUV).rgb; // could resuse the same osurce sampleColor isn't it ?\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV + du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV + dv - du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n sampleColor = taa_fetchSourceMap(fragUV + dv).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n \n sampleColor = taa_fetchSourceMap(fragUV + dv + du).rgb;\n sumSamples += sampleColor;\n sumSamples2 += sampleColor * sampleColor;\n\n \n vec3 mu = sumSamples / vec3(9.0);\n vec3 sigma = sqrt(max(sumSamples2 / vec3(9.0) - mu * mu, vec3(0.0)));\n \n float gamma = params.covarianceGamma;\n vec3 cmin = mu - gamma * sigma;\n vec3 cmax = mu + gamma * sigma;\n\n return mat3(cmin, cmax, mu);\n}\n\nmat3 taa_evalNeighbourColorRegion(vec3 sourceColor, vec2 fragUV, vec2 fragVelocity, float fragZe) {\n vec2 imageSize = taa_getImageSize();\n vec2 texelSize = taa_getTexelSize();\n vec3 cmin, cmax, cavg;\n\n #if MINMAX_3X3_ROUNDED\n\t\tvec2 du = vec2(texelSize.x, 0.0);\n\t\tvec2 dv = vec2(0.0, texelSize.y);\n\n\t\tvec3 ctl = taa_fetchSourceMap(fragUV - dv - du).rgb;\n\t\tvec3 ctc = taa_fetchSourceMap(fragUV - dv).rgb;\n\t\tvec3 ctr = taa_fetchSourceMap(fragUV - dv + du).rgb;\n\t\tvec3 cml = taa_fetchSourceMap(fragUV - du).rgb;\n\t\tvec3 cmc = sourceColor; //taa_fetchSourceMap(fragUV).rgb; // could resuse the same osurce sample isn't it ?\n\t\tvec3 cmr = taa_fetchSourceMap(fragUV + du).rgb;\n\t\tvec3 cbl = taa_fetchSourceMap(fragUV + dv - du).rgb;\n\t\tvec3 cbc = taa_fetchSourceMap(fragUV + dv).rgb;\n\t\tvec3 cbr = taa_fetchSourceMap(fragUV + dv + du).rgb;\n\n\t\tcmin = min(ctl, min(ctc, min(ctr, min(cml, min(cmc, min(cmr, min(cbl, min(cbc, cbr))))))));\n\t\tcmax = max(ctl, max(ctc, max(ctr, max(cml, max(cmc, max(cmr, max(cbl, max(cbc, cbr))))))));\n\n\t\t#if MINMAX_3X3_ROUNDED || USE_YCOCG || USE_CLIPPING\n\t\t\tcavg = (ctl + ctc + ctr + cml + cmc + cmr + cbl + cbc + cbr) / 9.0;\n #elif\n cavg = (cmin + cmax ) * 0.5;\n\t\t#endif\n\n\t\t#if MINMAX_3X3_ROUNDED\n\t\t\tvec3 cmin5 = min(ctc, min(cml, min(cmc, min(cmr, cbc))));\n\t\t\tvec3 cmax5 = max(ctc, max(cml, max(cmc, max(cmr, cbc))));\n\t\t\tvec3 cavg5 = (ctc + cml + cmc + cmr + cbc) / 5.0;\n\t\t\tcmin = 0.5 * (cmin + cmin5);\n\t\t\tcmax = 0.5 * (cmax + cmax5);\n\t\t\tcavg = 0.5 * (cavg + cavg5);\n\t\t#endif\n #else\n\t\tconst float _SubpixelThreshold = 0.5;\n\t\tconst float _GatherBase = 0.5;\n\t\tconst float _GatherSubpixelMotion = 0.1666;\n\n\t\tvec2 texel_vel = fragVelocity * imageSize;\n\t\tfloat texel_vel_mag = length(texel_vel) * -fragZe;\n\t\tfloat k_subpixel_motion = clamp(_SubpixelThreshold / (0.0001 + texel_vel_mag), 0.0, 1.0);\n\t\tfloat k_min_max_support = _GatherBase + _GatherSubpixelMotion * k_subpixel_motion;\n\n\t\tvec2 ss_offset01 = k_min_max_support * vec2(-texelSize.x, texelSize.y);\n\t\tvec2 ss_offset11 = k_min_max_support * vec2(texelSize.x, texelSize.y);\n\t\tvec3 c00 = taa_fetchSourceMap(fragUV - ss_offset11).rgb;\n\t\tvec3 c10 = taa_fetchSourceMap(fragUV - ss_offset01).rgb;\n\t\tvec3 c01 = taa_fetchSourceMap(fragUV + ss_offset01).rgb;\n\t\tvec3 c11 = taa_fetchSourceMap(fragUV + ss_offset11).rgb;\n\n\t\tcmin = min(c00, min(c10, min(c01, c11)));\n\t\tcmax = max(c00, max(c10, max(c01, c11)));\n cavg = (cmin + cmax ) * 0.5;\n\n\t\t#if USE_YCOCG || USE_CLIPPING\n\t\t\tcavg = (c00 + c10 + c01 + c11) / 4.0;\n #elif\n cavg = (cmin + cmax ) * 0.5;\n\t\t#endif\n #endif\n\n \t\t// shrink chroma min-max\n\t#if USE_YCOCG\n\t\tvec2 chroma_extent = vec2(0.25 * 0.5 * (cmax.r - cmin.r));\n\t\tvec2 chroma_center = sourceColor.gb;\n\t\tcmin.yz = chroma_center - chroma_extent;\n\t\tcmax.yz = chroma_center + chroma_extent;\n\t\tcavg.yz = chroma_center;\n\t#endif\n\n return mat3(cmin, cmax, cavg);\n}\n\n//#define USE_OPTIMIZATIONS 0\n\nvec3 taa_clampColor(vec3 colorMin, vec3 colorMax, vec3 colorSource, vec3 color) {\n\tconst float eps = 0.00001;\n vec3 p = colorSource;\n vec3 q = color;\n\t// note: only clips towards aabb center (but fast!)\n\tvec3 p_clip = 0.5 * (colorMax + colorMin);\n\tvec3 e_clip = 0.5 * (colorMax - colorMin) + vec3(eps);\n\n\tvec3 v_clip = q - p_clip;\n\tvec3 v_unit = v_clip.xyz / e_clip;\n\tvec3 a_unit = abs(v_unit);\n\tfloat ma_unit = max(a_unit.x, max(a_unit.y, a_unit.z));\n\n\tif (ma_unit > 1.0)\n\t\treturn p_clip + v_clip / ma_unit;\n\telse\n\t\treturn q;// point inside aabb\t\t\n}\n\nvec3 taa_evalConstrainColor(vec3 sourceColor, vec2 sourceUV, vec2 sourceVel, vec3 candidateColor) {\n mat3 colorMinMaxAvg;\n\n colorMinMaxAvg = taa_evalNeighbourColorVariance(sourceColor, sourceUV, sourceVel);\n \n\t// clamp history to neighbourhood of current sample\n return taa_clampColor(colorMinMaxAvg[0], colorMinMaxAvg[1], sourceColor, candidateColor);\n}\n\nvec3 taa_evalFeedbackColor(vec3 sourceColor, vec3 historyColor, float blendFactor) {\n const float _FeedbackMin = 0.1;\n const float _FeedbackMax = 0.9;\n\t// feedback weight from unbiased luminance diff (t.lottes)\n\t#if USE_YCOCG\n\t\tfloat lum0 = sourceColor.r;\n\t\tfloat lum1 = historyColor.r;\n\t#else\n\t\tfloat lum0 = Luminance(sourceColor.rgb);\n\t\tfloat lum1 = Luminance(historyColor.rgb);\n\t#endif\n\tfloat unbiased_diff = abs(lum0 - lum1) / max(lum0, max(lum1, 0.2));\n\tfloat unbiased_weight = 1.0 - unbiased_diff;\n\tfloat unbiased_weight_sqr = unbiased_weight * unbiased_weight;\n\tfloat k_feedback = mix(_FeedbackMin, _FeedbackMax, unbiased_weight_sqr);\n\n \n vec3 nextColor = mix(historyColor, sourceColor, k_feedback * blendFactor).xyz;\n return nextColor;\n}\n\n\nvec3 colorWheel(float normalizedHue) {\n float v = normalizedHue * 6.f;\n if (v < 0.f) {\n return vec3(1.f, 0.f, 0.f);\n } else if (v < 1.f) {\n return vec3(1.f, v, 0.f);\n } else if (v < 2.f) {\n return vec3(1.f - (v-1.f), 1.f, 0.f);\n } else if (v < 3.f) {\n return vec3(0.f, 1.f, (v-2.f));\n } else if (v < 4.f) {\n return vec3(0.f, 1.f - (v-3.f), 1.f );\n } else if (v < 5.f) {\n return vec3((v-4.f), 0.f, 1.f );\n } else if (v < 6.f) {\n return vec3(1.f, 0.f, 1.f - (v-5.f));\n } else {\n return vec3(1.f, 0.f, 0.f);\n }\n}\n\nvec3 colorRamp(float normalizedHue) {\n float v = normalizedHue * 5.f;\n if (v < 0.f) {\n return vec3(1.f, 0.f, 0.f);\n } else if (v < 1.f) {\n return vec3(1.f, v, 0.f);\n\n\n } else if (v < 2.f) {\n return vec3(1.f - (v - 1.f), 1.f, 0.f);\n } else if (v < 3.f) {\n return vec3(0.f, 1.f, (v - 2.f));\n } else if (v < 4.f) {\n return vec3(0.f, 1.f - (v - 3.f), 1.f);\n } else if (v < 5.f) {\n return vec3((v - 4.f), 0.f, 1.f);\n } else {\n return vec3(1.f, 0.f, 1.f);\n }\n}\n\n\nvec3 taa_getVelocityColorRelative(float velocityPixLength) {\n return colorRamp(velocityPixLength/params.debugShowVelocityThreshold);\n}\n\nvec3 taa_getVelocityColorAboveThreshold(float velocityPixLength) {\n return colorRamp((velocityPixLength - params.debugShowVelocityThreshold)/params.debugShowVelocityThreshold);\n}\n\n\nvec3 taa_evalFXAA(vec2 fragUV) {\n\n // vec2 texelSize = getInvWidthHeight();\n vec2 texelSize = taa_getTexelSize();\n\n // filter width limit for dependent \"two-tap\" texture samples\n float FXAA_SPAN_MAX = 8.0;\n\n // local contrast multiplier for performing AA\n // higher = sharper, but setting this value too high will cause near-vertical and near-horizontal edges to fail\n // see \"fxaaQualityEdgeThreshold\"\n float FXAA_REDUCE_MUL = 1.0 / 8.0;\n\n // luminance threshold for processing dark colors\n // see \"fxaaQualityEdgeThresholdMin\"\n float FXAA_REDUCE_MIN = 1.0 / 128.0;\n\n // fetch raw RGB values for nearby locations\n // sampling pattern is \"five on a die\" (each diagonal direction and the center)\n // computing the coordinates for these texture reads could be moved to the vertex shader for speed if needed\n vec3 rgbNW = texture(sourceMap, fragUV + (vec2(-1.0, -1.0) * texelSize)).xyz;\n vec3 rgbNE = texture(sourceMap, fragUV + (vec2(+1.0, -1.0) * texelSize)).xyz;\n vec3 rgbSW = texture(sourceMap, fragUV + (vec2(-1.0, +1.0) * texelSize)).xyz;\n vec3 rgbSE = texture(sourceMap, fragUV + (vec2(+1.0, +1.0) * texelSize)).xyz;\n vec3 rgbM = texture(sourceMap, fragUV).xyz;\n\t\n // convert RGB values to luminance\n vec3 luma = vec3(0.299, 0.587, 0.114);\n float lumaNW = dot(rgbNW, luma);\n float lumaNE = dot(rgbNE, luma);\n float lumaSW = dot(rgbSW, luma);\n float lumaSE = dot(rgbSE, luma);\n float lumaM = dot( rgbM, luma);\n\t\n // luma range of local neighborhood\n float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));\n float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));\n\t\n // direction perpendicular to local luma gradient\n vec2 dir;\n dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));\n dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));\n\n // compute clamped direction offset for additional \"two-tap\" samples\n // longer vector = blurry, shorter vector = sharp\n float dirReduce = max((lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL), FXAA_REDUCE_MIN);\n float rcpDirMin = 1.0 / (min(abs(dir.x), abs(dir.y)) + dirReduce);\n dir = min(vec2(FXAA_SPAN_MAX, FXAA_SPAN_MAX), \n max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX), dir * rcpDirMin)) * texelSize;\n\t\t\n // perform additional texture sampling perpendicular to gradient\n vec3 rgbA = (1.0 / 2.0) * (\n texture(sourceMap, fragUV + dir * (1.0 / 3.0 - 0.5)).xyz +\n texture(sourceMap, fragUV + dir * (2.0 / 3.0 - 0.5)).xyz);\n vec3 rgbB = rgbA * (1.0 / 2.0) + (1.0 / 4.0) * (\n texture(sourceMap, fragUV + dir * (0.0 / 3.0 - 0.5)).xyz +\n texture(sourceMap, fragUV + dir * (3.0 / 3.0 - 0.5)).xyz);\n float lumaB = dot(rgbB, luma);\n\n // compare luma of new samples to the luma range of the original neighborhood\n // if the new samples exceed this range, just use the first two samples instead of all four\n if (lumaB < lumaMin || lumaB > lumaMax) {\n return rgbA;\n } else {\n return rgbB;\n }\n}in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main() {\n vec2 fragUV = varTexCoord0;\n\n // Debug region before debug or fxaa region X\n float distToRegionFXAA = fragUV.x - taa_getRegionFXAA().x; \n if (distToRegionFXAA > 0.0) {\n outFragColor = vec4(taa_evalFXAA(fragUV), 1.0);\n return;\n }\n\n vec2 fragVel = taa_fetchVelocityMapBest(fragUV).xy;\n\n vec3 sourceColor;\n vec3 historyColor;\n vec2 prevFragUV = taa_fetchSourceAndHistory(fragUV, fragVel, sourceColor, historyColor);\n\n vec3 nextColor = sourceColor;\n \n if (taa_constrainColor()) {\n // clamp history to neighbourhood of current sample\n historyColor = taa_evalConstrainColor(sourceColor, fragUV, fragVel, historyColor);\n }\n \n if (taa_feedbackColor()) {\n nextColor = taa_evalFeedbackColor(sourceColor, historyColor, params.blend);\n } else {\n nextColor = mix(historyColor, sourceColor, params.blend);\n }\n\n outFragColor = vec4(taa_resolveColor(nextColor), 1.0);\n}\n\n\n"
+ },
+ "VTnPMaIDQTsMllfsMr5Tew==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// directional_skybox_light_shadow.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 1/18/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//!>\n\n// glsl / C++ compatible source as interface for Shadows\n#ifdef __cplusplus\n# define MAT4 glm::mat4\n#else\n# define MAT4 mat4\n#endif\n\n#define SHADOW_CASCADE_MAX_COUNT 4\n\nstruct ShadowTransform {\n\tMAT4 reprojection;\n\tfloat fixedBias;\n float slopeBias;\n float _padding1;\n float _padding2;\n};\n\nstruct ShadowParameters {\n ShadowTransform cascades[SHADOW_CASCADE_MAX_COUNT];\n int cascadeCount;\n float invMapSize;\n float invCascadeBlendWidth;\n float maxDistance;\n float invFalloffDistance;\n};\n\n// //\nlayout(std140) uniform shadowTransformBuffer {\n\tShadowParameters shadow;\n};\n\nint getShadowCascadeCount() {\n return shadow.cascadeCount;\n}\n\nfloat getShadowCascadeInvBlendWidth() {\n return shadow.invCascadeBlendWidth;\n}\n\nfloat evalShadowFalloff(float depth) {\n return clamp((shadow.maxDistance-depth) * shadow.invFalloffDistance, 0.0, 1.0);\n}\n\nmat4 getShadowReprojection(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].reprojection;\n}\n\nfloat getShadowScale() {\n\treturn shadow.invMapSize;\n}\n\nfloat getShadowFixedBias(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].fixedBias;\n}\n\nfloat getShadowSlopeBias(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].slopeBias;\n}\n\n\n// Compute the texture coordinates from world coordinates\nvec4 evalShadowTexcoord(int cascadeIndex, vec4 position) {\n\tvec4 shadowCoord = getShadowReprojection(cascadeIndex) * position;\n\treturn vec4(shadowCoord.xyz, 1.0);\n}\n\nbool isShadowCascadeProjectedOnPixel(vec4 cascadeTexCoords) {\n bvec2 greaterThanZero = greaterThan(cascadeTexCoords.xy, vec2(0));\n bvec2 lessThanOne = lessThan(cascadeTexCoords.xy, vec2(1));\n return all(greaterThanZero) && all(lessThanOne);\n}\n\nint getFirstShadowCascadeOnPixel(int startCascadeIndex, vec4 worldPosition, out vec4 cascadeShadowCoords) {\n int cascadeIndex;\n startCascadeIndex = min(startCascadeIndex, getShadowCascadeCount()-1);\n for (cascadeIndex=startCascadeIndex ; cascadeIndex> 1;\n#endif\n\n // Offset for efficient PCF, see http://http.developer.nvidia.com/GPUGems/gpugems_ch11.html\n ivec2 offset = coords & ivec2(1,1);\n offset.y = (offset.x+offset.y) & 1;\n\n offsets.points[0] = shadowScale * vec3(offset + PCFkernel[0], 0.0);\n offsets.points[1] = shadowScale * vec3(offset + PCFkernel[1], 0.0);\n offsets.points[2] = shadowScale * vec3(offset + PCFkernel[2], 0.0);\n offsets.points[3] = shadowScale * vec3(offset + PCFkernel[3], 0.0);\n\n return offsets;\n}\n\nfloat evalShadowAttenuationPCF(int cascadeIndex, ShadowSampleOffsets offsets, vec4 shadowTexcoord, float bias) {\n shadowTexcoord.z -= bias;\n float shadowAttenuation = 0.25 * (\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[0]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[1]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[2]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[3])\n );\n return shadowAttenuation;\n}\n\nfloat evalShadowCascadeAttenuation(int cascadeIndex, ShadowSampleOffsets offsets, vec4 shadowTexcoord, float oneMinusNdotL) {\n float bias = getShadowFixedBias(cascadeIndex) + getShadowSlopeBias(cascadeIndex) * oneMinusNdotL;\n return evalShadowAttenuationPCF(cascadeIndex, offsets, shadowTexcoord, bias);\n}\n\nfloat evalShadowAttenuation(vec3 worldLightDir, vec4 worldPosition, float viewDepth, vec3 worldNormal) {\n ShadowSampleOffsets offsets = evalShadowFilterOffsets(worldPosition);\n vec4 cascadeShadowCoords[2];\n cascadeShadowCoords[0] = vec4(0);\n cascadeShadowCoords[1] = vec4(0);\n ivec2 cascadeIndices;\n float cascadeMix = determineShadowCascadesOnPixel(worldPosition, viewDepth, cascadeShadowCoords, cascadeIndices);\n\t\n // Adjust bias if we are at a grazing angle with light\n float oneMinusNdotL = 1.0 - clamp(dot(worldLightDir, worldNormal), 0, 1);\n vec2 cascadeAttenuations = vec2(1.0, 1.0);\n cascadeAttenuations.x = evalShadowCascadeAttenuation(cascadeIndices.x, offsets, cascadeShadowCoords[0], oneMinusNdotL);\n if (cascadeMix > 0.0 && cascadeIndices.y < getShadowCascadeCount()) {\n cascadeAttenuations.y = evalShadowCascadeAttenuation(cascadeIndices.y, offsets, cascadeShadowCoords[1], oneMinusNdotL);\n }\n float attenuation = mix(cascadeAttenuations.x, cascadeAttenuations.y, cascadeMix);\n // Falloff to max distance\n return mix(1.0, attenuation, evalShadowFalloff(viewDepth));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n\n\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\n\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\n\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\n\n\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 normal, vec3 albedo, vec3 lightmap) {\n Light light = getKeyLight();\n LightAmbient ambient = getLightAmbient();\n\n // Catch normals perpendicular to the projection plane, hence the magic number for the threshold\n // It should be just 0, but we have inaccuracy so we overshoot\n const float PERPENDICULAR_THRESHOLD = -0.005;\n vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0)); // transform to worldspace\n float diffuseDot = dot(fragNormal, -getLightDirection(light));\n float facingLight = step(PERPENDICULAR_THRESHOLD, diffuseDot); \n\n // Reevaluate the shadow attenuation for light facing fragments\n float lightAttenuation = (1.0 - facingLight) + facingLight * shadowAttenuation;\n\n // Diffuse light is the lightmap dimmed by shadow\n vec3 diffuseLight = lightAttenuation * lightmap;\n\n // Ambient light is the lightmap when in shadow\n vec3 ambientLight = (1.0 - lightAttenuation) * lightmap * getLightAmbientIntensity(ambient);\n\n return isLightmapEnabled() * obscurance * albedo * (diffuseLight + ambientLight);\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n return specularLight;\n}\n\n\nfloat curvatureAO(in float k) {\n return 1.0f - (0.0022f * k * k) + (0.0776f * k) + 0.7369f;\n}\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz;\n\n\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nif (scattering * isScatteringEnabled() > 0.0) {\n float ambientOcclusion = curvatureAO(lowNormalCurvature.w * 20.0f) * 0.5f;\n float ambientOcclusionHF = curvatureAO(midNormalCurvature.w * 8.0f) * 0.5f;\n ambientOcclusion = min(ambientOcclusion, ambientOcclusionHF);\n\n obscurance = min(obscurance, ambientOcclusion);\n\n // Diffuse from ambient\n diffuse = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceLowNormal).xyz;\n\n // Scattering ambient specular is the same as non scattering for now\n // TODO: we should use the same specular answer as for direct lighting\n }\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\nvec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,\n vec3 albedo, vec3 fresnel, float metallic, float roughness\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance\n,scattering, midNormalCurvature, lowNormalCurvature \n);\n color += ambientDiffuse;\n color += ambientSpecular;\n\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += directionalDiffuse;\n color += directionalSpecular;\n\n // Attenuate the light if haze effect selected\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_KEYLIGHT_ATTENUATED) == HAZE_MODE_IS_KEYLIGHT_ATTENUATED) {\n color = computeHazeColorKeyLightAttenuation(color, lightDirection, fragPositionWS); \n }\n\n return color;\n}\n\n\n\nlayout(location = 0) in vec2 _texCoord0;\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);\n\n vec4 viewPos = vec4(frag.position.xyz, 1.0);\n vec4 worldPos = getViewInverse() * viewPos;\n Light shadowLight = getKeyLight();\n vec3 worldLightDirection = getLightDirection(shadowLight);\n float shadowAttenuation = evalShadowAttenuation(worldLightDirection, worldPos, -viewPos.z, frag.normal);\n\n // Light mapped or not ?\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n } else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {\n discard;\n } else {\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);\n }\n vec3 color = evalSkyboxGlobalColor(\n getViewInverse(),\n shadowAttenuation,\n frag.obscurance,\n frag.position.xyz,\n frag.normal,\n frag.albedo,\n frag.fresnel,\n frag.metallic,\n frag.roughness,\n frag.scattering,\n midNormalCurvature,\n lowNormalCurvature);\n\n\n _fragColor = vec4(color, 1.0);\n }\n}\n\n\n"
+ },
+ "VW6Abw5AzhHt52D7YLE4Ww==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n\n\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _tangentWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "VezxukUuV2VczJFSW6hvGg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n// overlay3D_model_transparent.frag\n//\n// Created by Sam Gateau on 2/27/2017.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n\n\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n\n\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPosition = _positionES.xyz;\n TransformCamera cam = getTransformCamera();\n\n vec4 color = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPosition,\n normalize(_normalWS),\n albedo,\n fresnel,\n metallic,\n emissive,\n roughness, opacity),\n opacity);\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n"
+ },
+ "W+HP2v/b3aXW0UJd68rbWw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n\n\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n// model_translucent_fade.frag\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n\n\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\n\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n fragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "W6lqpCKxUv9XryZ58YFYKw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/04/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// Generated on Wed May 23 14:24:07 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nout vec4 _fadeData1;\nout vec4 _fadeData2;\nout vec4 _fadeData3;\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\nout vec4 _positionWS;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n _fadeData1 = inTexCoord2;\n _fadeData2 = inTexCoord3;\n _fadeData3 = inTexCoord4; \n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_transparent_textured_unlit_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n// the albedo texture\nuniform sampler2D originalTexture;\n\nin vec4 _color;\nin vec2 _texCoord0;\nin vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor0;\n\n// Declare after all samplers to prevent sampler location mix up with originalTexture\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nin vec4 _fadeData1;\nin vec4 _fadeData2;\nin vec4 _fadeData3;\n\n\n\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = int(_fadeData1.w);\n fadeParams.threshold = _fadeData2.w;\n fadeParams.noiseOffset = _fadeData1.xyz;\n fadeParams.baseOffset = _fadeData2.xyz;\n fadeParams.baseInvSize = _fadeData3.xyz;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n vec4 texel = texture(originalTexture, _texCoord0.st);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n _fragColor0 = vec4(_color.rgb * texel.rgb + fadeEmissive, colorAlpha * texel.a);\n}\n\n"
+ },
+ "WaR4mMRDZuwrtZeKHhYcYQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// standardTransformPNTC.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/10/2015.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 varPosition;\nout vec3 varNormal;\nout vec2 varTexCoord0;\nout vec4 varColor;\n\nvoid main(void) {\n varTexCoord0 = inTexCoord0.st;\n varColor = color_sRGBAToLinear(inColor);\n \n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n varNormal = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n varPosition = inPosition.xyz;\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTextureOpaque.frag\n//\n// Draw texture 0 fetched at texcoord.xy\n// Alpha is 1\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nuniform sampler2D colorMap;\n\nlayout(location = 0) in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = vec4(texture(colorMap, varTexCoord0).xyz, 1.0);\n}\n\n\n"
+ },
+ "WbmSQgBwTA31eDChQObMLQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n// overlay3D_model_transparent.frag\n//\n// Created by Sam Gateau on 2/27/2017.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n\n\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n\n\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPosition = _positionES.xyz;\n TransformCamera cam = getTransformCamera();\n\n vec4 color = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPosition,\n normalize(_normalWS),\n albedo,\n fresnel,\n metallic,\n emissive,\n roughness, opacity),\n opacity);\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n"
+ },
+ "XK3QC6wS+A9lXJNuiydI8Q==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// overlay3D.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 _color;\nout float _alpha;\nout vec2 _texCoord0;\nout vec4 _positionES;\nout vec3 _normalWS;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n _texCoord0 = inTexCoord0.st;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n// overlay3D.frag\n// fragment shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n\n\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n\n\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\nvec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 fresnel, float roughness, float opacity) {\n\n // Need the light now\n Light light = getKeyLight();\n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n LightAmbient ambient = getLightAmbient();\n\n TransformCamera cam = getTransformCamera();\n vec3 fragEyeVectorView = normalize(-position);\n vec3 fragEyeDir;\n { // transformEyeToWorldDir\n fragEyeDir = vec3(cam._viewInverse * vec4(fragEyeVectorView.xyz, 0.0));\n }\n\n\n SurfaceData surface = initSurfaceData(roughness, normal, fragEyeDir);\n\n vec3 color = opacity * albedo * getLightColor(light) * getLightAmbientIntensity(ambient);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse * isDiffuseEnabled() * isDirectionalEnabled();\n color += directionalSpecular * isSpecularEnabled() * isDirectionalEnabled();\n\n return vec4(color, opacity);\n}\n\nuniform sampler2D originalTexture;\n\nin vec2 _texCoord0;\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec4 albedo = texture(originalTexture, _texCoord0);\n\n vec3 fragPosition = _positionES.xyz;\n vec3 fragNormal = normalize(_normalWS);\n vec3 fragAlbedo = albedo.rgb * _color;\n float fragMetallic = 0.0;\n vec3 fragSpecular = vec3(0.1);\n float fragRoughness = 0.9;\n float fragOpacity = albedo.a;\n\n if (fragOpacity <= 0.1) {\n discard;\n }\n\n vec4 color = evalGlobalColor(1.0,\n fragPosition,\n fragNormal,\n fragAlbedo,\n fragMetallic,\n fragSpecular,\n fragRoughness,\n fragOpacity);\n\n\n // Apply standard tone mapping\n _fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);\n}\n\n\n"
+ },
+ "YRhR/TtZ3TB+GLUSmpfmXw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_translucent.vert\n// vertex shader\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout float _alpha;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec4 _positionES;\nout vec4 _positionWS;\nout vec3 _normalWS;\nout vec3 _color;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n// model_translucent_fade.frag\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n\n\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\n\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n fragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "ZDrgpFTPVfPS6hdGXRwS4g==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n\n\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_specular_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "ZFdSYNQ583QSXI16YpjAjg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:43 2018\n//\n// blurGaussianDepthAwareV.frag\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Olivier Prat on 09/25/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n#define BLUR_MAX_NUM_TAPS 33\nstruct BlurParameters {\n vec4 resolutionInfo;\n vec4 texcoordTransform;\n vec4 filterInfo;\n vec4 depthInfo;\n vec4 stereoInfo;\n vec4 linearDepthInfo;\n vec2 taps[BLUR_MAX_NUM_TAPS];\n};\n\nuniform blurParamsBuffer {\n BlurParameters parameters;\n};\n\nvec2 getViewportInvWidthHeight() {\n return parameters.resolutionInfo.zw;\n}\n\nvec2 evalTexcoordTransformed(vec2 texcoord) {\n return (texcoord * parameters.texcoordTransform.zw + parameters.texcoordTransform.xy);\n}\n\nfloat getFilterScale() {\n return parameters.filterInfo.x;\n}\n\nint getFilterNumTaps() {\n return int(parameters.filterInfo.y);\n}\n\nfloat getOutputAlpha() {\n return parameters.filterInfo.z;\n}\n\nvec2 getFilterTap(int index) {\n return parameters.taps[index];\n}\n\nfloat getFilterTapOffset(vec2 tap) {\n return tap.x;\n}\n\nfloat getFilterTapWeight(vec2 tap) {\n return tap.y;\n}\n\nfloat getDepthThreshold() {\n return parameters.depthInfo.x;\n}\n\nfloat getDepthPerspective() {\n return parameters.depthInfo.w;\n}\n\nfloat getPosLinearDepthFar() {\n return parameters.linearDepthInfo.x;\n}\n\n\n\nuniform sampler2D sourceMap;\nuniform sampler2D depthMap;\n\nvec4 pixelShaderGaussianDepthAware(vec2 texcoord, vec2 direction, vec2 pixelStep) {\n texcoord = evalTexcoordTransformed(texcoord);\n float sampleDepth = texture(depthMap, texcoord).x;\n if (sampleDepth >= getPosLinearDepthFar()) {\n discard;\n }\n vec4 sampleCenter = texture(sourceMap, texcoord);\n\n // Calculate the width scale.\n float distanceToProjectionWindow = getDepthPerspective();\n\n float depthThreshold = getDepthThreshold();\n\n // Calculate the final step to fetch the surrounding pixels.\n float filterScale = getFilterScale();\n float scale = distanceToProjectionWindow / sampleDepth;\n\n vec2 finalStep = filterScale * scale * direction * pixelStep;\n int numTaps = getFilterNumTaps();\n\n // Accumulate the center sample\n vec2 tapInfo = getFilterTap(0);\n float totalWeight = getFilterTapWeight(tapInfo);\n vec4 srcBlurred = sampleCenter * totalWeight;\n\n for(int i = 1; i < numTaps; i++) {\n tapInfo = getFilterTap(i);\n\n // Fetch color and depth for current sample.\n vec2 sampleCoord = texcoord + (getFilterTapOffset(tapInfo) * finalStep);\n if (all(greaterThanEqual(sampleCoord, vec2(0,0))) && all(lessThanEqual(sampleCoord, vec2(1.0,1.0)))) {\n float srcDepth = texture(depthMap, sampleCoord).x;\n vec4 srcSample = texture(sourceMap, sampleCoord);\n float weight = getFilterTapWeight(tapInfo);\n \n // If the difference in depth is huge, we lerp color back.\n float s = clamp(depthThreshold * distanceToProjectionWindow * filterScale * abs(srcDepth - sampleDepth), 0.0, 1.0);\n srcSample = mix(srcSample, sampleCenter, s);\n\n // Accumulate.\n srcBlurred += srcSample * weight;\n totalWeight += weight;\n }\n } \n \n if (totalWeight>0.0) {\n srcBlurred /= totalWeight;\n }\n return srcBlurred;\n}\n\n\n\nlayout(location = 0) in vec2 varTexCoord0;\n\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = pixelShaderGaussianDepthAware(varTexCoord0, vec2(0.0, 1.0), getViewportInvWidthHeight());\n}\n\n\n\n"
+ },
+ "ZeGPkhGlT/ymBXA0jFFamw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:43 2018\n//\n// blurGaussianDepthAwareH.frag\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Olivier Prat on 09/25/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n#define BLUR_MAX_NUM_TAPS 33\nstruct BlurParameters {\n vec4 resolutionInfo;\n vec4 texcoordTransform;\n vec4 filterInfo;\n vec4 depthInfo;\n vec4 stereoInfo;\n vec4 linearDepthInfo;\n vec2 taps[BLUR_MAX_NUM_TAPS];\n};\n\nuniform blurParamsBuffer {\n BlurParameters parameters;\n};\n\nvec2 getViewportInvWidthHeight() {\n return parameters.resolutionInfo.zw;\n}\n\nvec2 evalTexcoordTransformed(vec2 texcoord) {\n return (texcoord * parameters.texcoordTransform.zw + parameters.texcoordTransform.xy);\n}\n\nfloat getFilterScale() {\n return parameters.filterInfo.x;\n}\n\nint getFilterNumTaps() {\n return int(parameters.filterInfo.y);\n}\n\nfloat getOutputAlpha() {\n return parameters.filterInfo.z;\n}\n\nvec2 getFilterTap(int index) {\n return parameters.taps[index];\n}\n\nfloat getFilterTapOffset(vec2 tap) {\n return tap.x;\n}\n\nfloat getFilterTapWeight(vec2 tap) {\n return tap.y;\n}\n\nfloat getDepthThreshold() {\n return parameters.depthInfo.x;\n}\n\nfloat getDepthPerspective() {\n return parameters.depthInfo.w;\n}\n\nfloat getPosLinearDepthFar() {\n return parameters.linearDepthInfo.x;\n}\n\n\n\nuniform sampler2D sourceMap;\nuniform sampler2D depthMap;\n\nvec4 pixelShaderGaussianDepthAware(vec2 texcoord, vec2 direction, vec2 pixelStep) {\n texcoord = evalTexcoordTransformed(texcoord);\n float sampleDepth = texture(depthMap, texcoord).x;\n if (sampleDepth >= getPosLinearDepthFar()) {\n discard;\n }\n vec4 sampleCenter = texture(sourceMap, texcoord);\n\n // Calculate the width scale.\n float distanceToProjectionWindow = getDepthPerspective();\n\n float depthThreshold = getDepthThreshold();\n\n // Calculate the final step to fetch the surrounding pixels.\n float filterScale = getFilterScale();\n float scale = distanceToProjectionWindow / sampleDepth;\n\n vec2 finalStep = filterScale * scale * direction * pixelStep;\n int numTaps = getFilterNumTaps();\n\n // Accumulate the center sample\n vec2 tapInfo = getFilterTap(0);\n float totalWeight = getFilterTapWeight(tapInfo);\n vec4 srcBlurred = sampleCenter * totalWeight;\n\n for(int i = 1; i < numTaps; i++) {\n tapInfo = getFilterTap(i);\n\n // Fetch color and depth for current sample.\n vec2 sampleCoord = texcoord + (getFilterTapOffset(tapInfo) * finalStep);\n if (all(greaterThanEqual(sampleCoord, vec2(0,0))) && all(lessThanEqual(sampleCoord, vec2(1.0,1.0)))) {\n float srcDepth = texture(depthMap, sampleCoord).x;\n vec4 srcSample = texture(sourceMap, sampleCoord);\n float weight = getFilterTapWeight(tapInfo);\n \n // If the difference in depth is huge, we lerp color back.\n float s = clamp(depthThreshold * distanceToProjectionWindow * filterScale * abs(srcDepth - sampleDepth), 0.0, 1.0);\n srcSample = mix(srcSample, sampleCenter, s);\n\n // Accumulate.\n srcBlurred += srcSample * weight;\n totalWeight += weight;\n }\n } \n \n if (totalWeight>0.0) {\n srcBlurred /= totalWeight;\n }\n return srcBlurred;\n}\n\n\n\nlayout(location = 0) in vec2 varTexCoord0;\n\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = pixelShaderGaussianDepthAware(varTexCoord0, vec2(1.0, 0.0), getViewportInvWidthHeight());\n}\n\n\n\n"
+ },
+ "ZezT7IYThMJrsNlMn12WVw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// skin_model_shadow_fade_dq.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\n\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// skin_model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/08/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "aDHkDPH8eyY9e3nPkAsVJA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_translucent_unlit_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n\n\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec3 _color;\nin float _alpha;\nin vec4 _positionWS;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n albedo += fadeEmissive;\n _fragColor = vec4(albedo * isUnlitEnabled(), opacity);\n}\n\n\n"
+ },
+ "ac/KNw/7IqiVu8234N//SQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// subsurfaceScattering_makeSpecularBeckmann.frag\n//\n// Created by Sam Gateau on 6/30/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nfloat specularBeckmann(float ndoth, float roughness) {\n float alpha = acos(ndoth);\n float ta = tan(alpha);\n float val = 1.0 / (roughness * roughness * pow(ndoth, 4.0)) * exp(-(ta * ta) / (roughness * roughness));\n return val;\n}\n\nvoid main(void) {\n outFragColor = vec4(vec3(0.5 * pow( specularBeckmann(varTexCoord0.x, varTexCoord0.y), 0.1)), 1.0);\n}\n\n\n"
+ },
+ "aczRujxXj+aRYhb9b147Dg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_translucent_normal_map.vert\n// vertex shader\n//\n// Created by Olivier Prat on 23/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout float _alpha;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec4 _positionES;\nout vec4 _positionWS;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _tangentWS = vec3(dot(mr0, inTangent.xyz), dot(mr1, inTangent.xyz), dot(mr2, inTangent.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_translucent_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 23/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n\n\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec3 _tangentWS;\nin vec3 _color;\nin float _alpha;\nin vec4 _positionWS;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n int matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "bB6y3KjzOm6CAmU3y7PcWQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// directional_ambient_light_shadow.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 1/18/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for Shadows\n#ifdef __cplusplus\n# define MAT4 glm::mat4\n#else\n# define MAT4 mat4\n#endif\n\n#define SHADOW_CASCADE_MAX_COUNT 4\n\nstruct ShadowTransform {\n\tMAT4 reprojection;\n\tfloat fixedBias;\n float slopeBias;\n float _padding1;\n float _padding2;\n};\n\nstruct ShadowParameters {\n ShadowTransform cascades[SHADOW_CASCADE_MAX_COUNT];\n int cascadeCount;\n float invMapSize;\n float invCascadeBlendWidth;\n float maxDistance;\n float invFalloffDistance;\n};\n\n// //\nlayout(std140) uniform shadowTransformBuffer {\n\tShadowParameters shadow;\n};\n\nint getShadowCascadeCount() {\n return shadow.cascadeCount;\n}\n\nfloat getShadowCascadeInvBlendWidth() {\n return shadow.invCascadeBlendWidth;\n}\n\nfloat evalShadowFalloff(float depth) {\n return clamp((shadow.maxDistance-depth) * shadow.invFalloffDistance, 0.0, 1.0);\n}\n\nmat4 getShadowReprojection(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].reprojection;\n}\n\nfloat getShadowScale() {\n\treturn shadow.invMapSize;\n}\n\nfloat getShadowFixedBias(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].fixedBias;\n}\n\nfloat getShadowSlopeBias(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].slopeBias;\n}\n\n\n// Compute the texture coordinates from world coordinates\nvec4 evalShadowTexcoord(int cascadeIndex, vec4 position) {\n\tvec4 shadowCoord = getShadowReprojection(cascadeIndex) * position;\n\treturn vec4(shadowCoord.xyz, 1.0);\n}\n\nbool isShadowCascadeProjectedOnPixel(vec4 cascadeTexCoords) {\n bvec2 greaterThanZero = greaterThan(cascadeTexCoords.xy, vec2(0));\n bvec2 lessThanOne = lessThan(cascadeTexCoords.xy, vec2(1));\n return all(greaterThanZero) && all(lessThanOne);\n}\n\nint getFirstShadowCascadeOnPixel(int startCascadeIndex, vec4 worldPosition, out vec4 cascadeShadowCoords) {\n int cascadeIndex;\n startCascadeIndex = min(startCascadeIndex, getShadowCascadeCount()-1);\n for (cascadeIndex=startCascadeIndex ; cascadeIndex> 1;\n#endif\n\n // Offset for efficient PCF, see http://http.developer.nvidia.com/GPUGems/gpugems_ch11.html\n ivec2 offset = coords & ivec2(1,1);\n offset.y = (offset.x+offset.y) & 1;\n\n offsets.points[0] = shadowScale * vec3(offset + PCFkernel[0], 0.0);\n offsets.points[1] = shadowScale * vec3(offset + PCFkernel[1], 0.0);\n offsets.points[2] = shadowScale * vec3(offset + PCFkernel[2], 0.0);\n offsets.points[3] = shadowScale * vec3(offset + PCFkernel[3], 0.0);\n\n return offsets;\n}\n\nfloat evalShadowAttenuationPCF(int cascadeIndex, ShadowSampleOffsets offsets, vec4 shadowTexcoord, float bias) {\n shadowTexcoord.z -= bias;\n float shadowAttenuation = 0.25 * (\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[0]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[1]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[2]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[3])\n );\n return shadowAttenuation;\n}\n\nfloat evalShadowCascadeAttenuation(int cascadeIndex, ShadowSampleOffsets offsets, vec4 shadowTexcoord, float oneMinusNdotL) {\n float bias = getShadowFixedBias(cascadeIndex) + getShadowSlopeBias(cascadeIndex) * oneMinusNdotL;\n return evalShadowAttenuationPCF(cascadeIndex, offsets, shadowTexcoord, bias);\n}\n\nfloat evalShadowAttenuation(vec3 worldLightDir, vec4 worldPosition, float viewDepth, vec3 worldNormal) {\n ShadowSampleOffsets offsets = evalShadowFilterOffsets(worldPosition);\n vec4 cascadeShadowCoords[2];\n cascadeShadowCoords[0] = vec4(0);\n cascadeShadowCoords[1] = vec4(0);\n ivec2 cascadeIndices;\n float cascadeMix = determineShadowCascadesOnPixel(worldPosition, viewDepth, cascadeShadowCoords, cascadeIndices);\n\t\n // Adjust bias if we are at a grazing angle with light\n float oneMinusNdotL = 1.0 - clamp(dot(worldLightDir, worldNormal), 0, 1);\n vec2 cascadeAttenuations = vec2(1.0, 1.0);\n cascadeAttenuations.x = evalShadowCascadeAttenuation(cascadeIndices.x, offsets, cascadeShadowCoords[0], oneMinusNdotL);\n if (cascadeMix > 0.0 && cascadeIndices.y < getShadowCascadeCount()) {\n cascadeAttenuations.y = evalShadowCascadeAttenuation(cascadeIndices.y, offsets, cascadeShadowCoords[1], oneMinusNdotL);\n }\n float attenuation = mix(cascadeAttenuations.x, cascadeAttenuations.y, cascadeMix);\n // Falloff to max distance\n return mix(1.0, attenuation, evalShadowFalloff(viewDepth));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n\n\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\n\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\n\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\n\n\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 normal, vec3 albedo, vec3 lightmap) {\n Light light = getKeyLight();\n LightAmbient ambient = getLightAmbient();\n\n // Catch normals perpendicular to the projection plane, hence the magic number for the threshold\n // It should be just 0, but we have inaccuracy so we overshoot\n const float PERPENDICULAR_THRESHOLD = -0.005;\n vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0)); // transform to worldspace\n float diffuseDot = dot(fragNormal, -getLightDirection(light));\n float facingLight = step(PERPENDICULAR_THRESHOLD, diffuseDot); \n\n // Reevaluate the shadow attenuation for light facing fragments\n float lightAttenuation = (1.0 - facingLight) + facingLight * shadowAttenuation;\n\n // Diffuse light is the lightmap dimmed by shadow\n vec3 diffuseLight = lightAttenuation * lightmap;\n\n // Ambient light is the lightmap when in shadow\n vec3 ambientLight = (1.0 - lightAttenuation) * lightmap * getLightAmbientIntensity(ambient);\n\n return isLightmapEnabled() * obscurance * albedo * (diffuseLight + ambientLight);\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nfloat curvatureAO(in float k) {\n return 1.0f - (0.0022f * k * k) + (0.0776f * k) + 0.7369f;\n}\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nif (scattering * isScatteringEnabled() > 0.0) {\n float ambientOcclusion = curvatureAO(lowNormalCurvature.w * 20.0f) * 0.5f;\n\n\n float ambientOcclusionHF = curvatureAO(midNormalCurvature.w * 8.0f) * 0.5f;\n ambientOcclusion = min(ambientOcclusion, ambientOcclusionHF);\n\n obscurance = min(obscurance, ambientOcclusion);\n\n // Diffuse from ambient\n diffuse = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceLowNormal).xyz;\n\n // Scattering ambient specular is the same as non scattering for now\n // TODO: we should use the same specular answer as for direct lighting\n }\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\nvec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,\nvec3 albedo, vec3 fresnel, float metallic, float roughness\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += ambientDiffuse;\n color += ambientSpecular;\n\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += directionalDiffuse;\n color += directionalSpecular;\n\n return color;\n}\n\n\n\nlayout(location = 0) in vec2 _texCoord0;\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);\n\n vec4 viewPos = vec4(frag.position.xyz, 1.0);\n vec4 worldPos = getViewInverse() * viewPos;\n Light shadowLight = getKeyLight();\n vec3 worldLightDirection = getLightDirection(shadowLight);\n float shadowAttenuation = evalShadowAttenuation(worldLightDirection, worldPos, -viewPos.z, frag.normal);\n\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n } else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {\n discard;\n } else {\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);\n }\n vec3 color = evalAmbientSphereGlobalColor(\n getViewInverse(),\n shadowAttenuation,\n frag.obscurance,\n frag.position.xyz,\n frag.normal,\n frag.albedo,\n frag.fresnel,\n frag.metallic,\n frag.roughness,\n frag.scattering,\n midNormalCurvature,\n lowNormalCurvature);\n \n _fragColor = vec4(color, 1.0);\n }\n}\n\n\n"
+ },
+ "bQwmNhUUAqzPWpHuwpBapw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// directional_skybox_light.frag\n// fragment shader\n//\n// Created by Sam Gateau on 5/8/2015.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n\n\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n\n\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n\n\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 normal, vec3 albedo, vec3 lightmap) {\n Light light = getKeyLight();\n LightAmbient ambient = getLightAmbient();\n\n\n\n // Catch normals perpendicular to the projection plane, hence the magic number for the threshold\n // It should be just 0, but we have inaccuracy so we overshoot\n const float PERPENDICULAR_THRESHOLD = -0.005;\n vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0)); // transform to worldspace\n float diffuseDot = dot(fragNormal, -getLightDirection(light));\n float facingLight = step(PERPENDICULAR_THRESHOLD, diffuseDot); \n\n // Reevaluate the shadow attenuation for light facing fragments\n float lightAttenuation = (1.0 - facingLight) + facingLight * shadowAttenuation;\n\n // Diffuse light is the lightmap dimmed by shadow\n vec3 diffuseLight = lightAttenuation * lightmap;\n\n // Ambient light is the lightmap when in shadow\n vec3 ambientLight = (1.0 - lightAttenuation) * lightmap * getLightAmbientIntensity(ambient);\n\n return isLightmapEnabled() * obscurance * albedo * (diffuseLight + ambientLight);\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n return specularLight;\n}\n\n\nfloat curvatureAO(in float k) {\n return 1.0f - (0.0022f * k * k) + (0.0776f * k) + 0.7369f;\n}\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nif (scattering * isScatteringEnabled() > 0.0) {\n float ambientOcclusion = curvatureAO(lowNormalCurvature.w * 20.0f) * 0.5f;\n float ambientOcclusionHF = curvatureAO(midNormalCurvature.w * 8.0f) * 0.5f;\n ambientOcclusion = min(ambientOcclusion, ambientOcclusionHF);\n\n obscurance = min(obscurance, ambientOcclusion);\n\n // Diffuse from ambient\n diffuse = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceLowNormal).xyz;\n\n // Scattering ambient specular is the same as non scattering for now\n // TODO: we should use the same specular answer as for direct lighting\n }\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\nvec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,\n vec3 albedo, vec3 fresnel, float metallic, float roughness\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance\n,scattering, midNormalCurvature, lowNormalCurvature \n);\n color += ambientDiffuse;\n color += ambientSpecular;\n\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += directionalDiffuse;\n color += directionalSpecular;\n\n // Attenuate the light if haze effect selected\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_KEYLIGHT_ATTENUATED) == HAZE_MODE_IS_KEYLIGHT_ATTENUATED) {\n color = computeHazeColorKeyLightAttenuation(color, lightDirection, fragPositionWS); \n }\n\n return color;\n}\n\n\n\nlayout(location = 0) in vec2 _texCoord0;\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);\n\n float shadowAttenuation = 1.0;\n\n // Light mapped or not ?\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n } else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {\n discard;\n } else {\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);\n }\n vec3 color = evalSkyboxGlobalColor(\n getViewInverse(),\n shadowAttenuation,\n frag.obscurance,\n frag.position.xyz,\n frag.normal,\n frag.albedo,\n frag.fresnel,\n frag.metallic,\n frag.roughness,\n frag.scattering,\n midNormalCurvature,\n lowNormalCurvature);\n\n _fragColor = vec4(color, 1.0);\n }\n}\n\n\n"
+ },
+ "bwGVwYvJmCg8vJFz7IoMGA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// skin_model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "c+ZrVo/zLabK7DoATVqlzQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:43 2018\n//\n// blurGaussianH.frag\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Sam Gateau on 6/7/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:23:43 2018\n//\n// Created by Olivier Prat on 09/25/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n#define BLUR_MAX_NUM_TAPS 33\nstruct BlurParameters {\n vec4 resolutionInfo;\n vec4 texcoordTransform;\n vec4 filterInfo;\n vec4 depthInfo;\n vec4 stereoInfo;\n vec4 linearDepthInfo;\n vec2 taps[BLUR_MAX_NUM_TAPS];\n};\n\nuniform blurParamsBuffer {\n BlurParameters parameters;\n};\n\nvec2 getViewportInvWidthHeight() {\n return parameters.resolutionInfo.zw;\n}\n\nvec2 evalTexcoordTransformed(vec2 texcoord) {\n return (texcoord * parameters.texcoordTransform.zw + parameters.texcoordTransform.xy);\n}\n\nfloat getFilterScale() {\n return parameters.filterInfo.x;\n}\n\nint getFilterNumTaps() {\n return int(parameters.filterInfo.y);\n}\n\nfloat getOutputAlpha() {\n return parameters.filterInfo.z;\n}\n\nvec2 getFilterTap(int index) {\n return parameters.taps[index];\n}\n\nfloat getFilterTapOffset(vec2 tap) {\n return tap.x;\n}\n\nfloat getFilterTapWeight(vec2 tap) {\n return tap.y;\n}\n\nfloat getDepthThreshold() {\n return parameters.depthInfo.x;\n}\n\nfloat getDepthPerspective() {\n return parameters.depthInfo.w;\n}\n\nfloat getPosLinearDepthFar() {\n return parameters.linearDepthInfo.x;\n}\n\n\n\nuniform sampler2D sourceMap;\n\nvec4 pixelShaderGaussian(vec2 texcoord, vec2 direction, vec2 pixelStep) {\n texcoord = evalTexcoordTransformed(texcoord);\n\n vec2 finalStep = getFilterScale() * direction * pixelStep;\n vec4 srcBlurred = vec4(0.0);\n float totalWeight = 0.f;\n int numTaps = getFilterNumTaps();\n \n for(int i = 0; i < numTaps; i++) {\n vec2 tapInfo = getFilterTap(i);\n // Fetch color for current sample.\n vec2 sampleCoord = texcoord + (getFilterTapOffset(tapInfo) * finalStep);\n if (all(greaterThanEqual(sampleCoord, vec2(0,0))) && all(lessThanEqual(sampleCoord, vec2(1.0,1.0)))) {\n vec4 srcSample = texture(sourceMap, sampleCoord);\n float weight = getFilterTapWeight(tapInfo);\n // Accumulate.\n srcBlurred += srcSample * weight;\n totalWeight += weight;\n }\n }\n \n if (totalWeight>0.0) {\n srcBlurred /= totalWeight;\n }\n srcBlurred.a = getOutputAlpha();\n return srcBlurred;\n}\n\n\n\n\nlayout(location = 0) in vec2 varTexCoord0;\n\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = pixelShaderGaussian(varTexCoord0, vec2(1.0, 0.0), getViewportInvWidthHeight());\n}\n\n\n\n"
+ },
+ "cOLui+erS7m5I6glwc2H/A==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// subsurfaceScattering_makeProfile.frag\n//\n// Created by Sam Gateau on 6/27/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nfloat gaussian(float v, float r) {\n const float _PI = 3.14159265358979523846;\n return (1.0 / sqrt(2.0 * _PI * v)) * exp(-(r*r) / (2.0 * v));\n}\n\nvec3 scatter(float r) {\n // r is the distance expressed in millimeter\n // returns the scatter reflectance\n // Values from GPU Gems 3 \"Advanced Skin Rendering\".\n // Originally taken from real life samples.\n const vec4 profile[6] = vec4[6](\n vec4(0.0064, 0.233, 0.455, 0.649),\n vec4(0.0484, 0.100, 0.336, 0.344),\n vec4(0.1870, 0.118, 0.198, 0.000),\n vec4(0.5670, 0.113, 0.007, 0.007),\n vec4(1.9900, 0.358, 0.004, 0.000),\n vec4(7.4100, 0.078, 0.000, 0.000)\n );\n const int profileNum = 6;\n\n vec3 ret = vec3(0.0);\n for (int i = 0; i < profileNum; i++) {\n float v = profile[i].x * 1.414;\n float g = gaussian(v, r);\n ret += g * profile[i].yzw;\n }\n\n return ret;\n}\n\n\n\nvec3 generateProfile(vec2 uv) {\n return scatter(uv.x * 2.0);\n}\n\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = vec4(generateProfile(varTexCoord0.xy), 1.0);\n}\n\n\n"
+ },
+ "cuqklOIZ4d1AGklaKR2C0Q==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// skin_model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/08/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "d4HgeNo/bw1nDnwvLEqa0g==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _tangentWS;\nlayout(location = 6) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive + fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "dAktKesn7UY7+h6sq47y3w==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_translucent.vert\n// vertex shader\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout float _alpha;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec4 _positionES;\nout vec4 _positionWS;\nout vec3 _normalWS;\nout vec3 _color;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n// model_translucent_fade.frag\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n\n\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\n\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n fragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "dhhvXoOvTtCA5UTcdZK1+g==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_lightmap_normal_map.vert\n// vertex shader\n//\n// Created by Sam Gateau on 11/21/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _tangentWS;\nlayout(location = 5) out vec3 _color;\n\nvoid main(void) {\n // pass along the color in linear space\n _color = color_sRGBToLinear(inColor.xyz);\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord1.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _tangentWS = vec3(dot(mr0, inTangent.xyz), dot(mr1, inTangent.xyz), dot(mr2, inTangent.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_lightmap_normal_map.frag\n// fragment shader\n//\n// Created by Samuel Gateau on 11/19/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\n#endif\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nuniform sampler2D emissiveMap;\nvec3 fetchLightmapMap(vec2 uv) {\n vec2 emissiveParams = getTexMapArray()._lightmapParams.xy;\n return (vec3(emissiveParams.x) + emissiveParams.y * texture(emissiveMap, uv).rgb);\n}\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _tangentWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedo = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTexel = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\n\n vec3 lightmapVal = fetchLightmapMap(_texCoord1);\n\n \n vec3 fragNormal;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTexel, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormal = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n packDeferredFragmentLightmap(\n normalize(fragNormal.xyz),\n evalOpaqueFinalAlpha(getMaterialOpacity(mat), albedo.a),\n getMaterialAlbedo(mat) * albedo.rgb * _color,\n getMaterialRoughness(mat) * roughness,\n getMaterialMetallic(mat) * metallicTex,\n /*specular, // no use of */ getMaterialFresnel(mat),\n lightmapVal);\n}\n\n\n"
+ },
+ "dnlqL9nMiIkDsF1B+zt2dw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// overlay3D.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 _color;\nout float _alpha;\nout vec2 _texCoord0;\nout vec4 _positionES;\nout vec3 _normalWS;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n _texCoord0 = inTexCoord0.st;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// overlay3D_translucent_unlit.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 2/2/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nuniform sampler2D originalTexture;\n\nin vec2 _texCoord0;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec4 albedo = texture(originalTexture, _texCoord0);\n\n _fragColor = vec4(albedo.rgb * _color, albedo.a * _alpha);\n}\n\n\n"
+ },
+ "e0Pe74cfrY87BUGhMWpd+A==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// deferred_light.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/16.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec2 _texCoord0;\n\nuniform vec4 texcoordFrameTransform;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n _texCoord0 = (pos.xy + 1.0) * 0.5;\n\n _texCoord0 *= texcoordFrameTransform.zw;\n _texCoord0 += texcoordFrameTransform.xy;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// directional_skybox_light_shadow.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 1/18/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//!>\n\n// glsl / C++ compatible source as interface for Shadows\n#ifdef __cplusplus\n# define MAT4 glm::mat4\n#else\n# define MAT4 mat4\n#endif\n\n#define SHADOW_CASCADE_MAX_COUNT 4\n\nstruct ShadowTransform {\n\tMAT4 reprojection;\n\tfloat fixedBias;\n float slopeBias;\n float _padding1;\n float _padding2;\n};\n\nstruct ShadowParameters {\n ShadowTransform cascades[SHADOW_CASCADE_MAX_COUNT];\n int cascadeCount;\n float invMapSize;\n float invCascadeBlendWidth;\n float maxDistance;\n float invFalloffDistance;\n};\n\n// //\nlayout(std140) uniform shadowTransformBuffer {\n\tShadowParameters shadow;\n};\n\nint getShadowCascadeCount() {\n return shadow.cascadeCount;\n}\n\nfloat getShadowCascadeInvBlendWidth() {\n return shadow.invCascadeBlendWidth;\n}\n\nfloat evalShadowFalloff(float depth) {\n return clamp((shadow.maxDistance-depth) * shadow.invFalloffDistance, 0.0, 1.0);\n}\n\nmat4 getShadowReprojection(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].reprojection;\n}\n\nfloat getShadowScale() {\n\treturn shadow.invMapSize;\n}\n\nfloat getShadowFixedBias(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].fixedBias;\n}\n\nfloat getShadowSlopeBias(int cascadeIndex) {\n\treturn shadow.cascades[cascadeIndex].slopeBias;\n}\n\n\n// Compute the texture coordinates from world coordinates\nvec4 evalShadowTexcoord(int cascadeIndex, vec4 position) {\n\tvec4 shadowCoord = getShadowReprojection(cascadeIndex) * position;\n\treturn vec4(shadowCoord.xyz, 1.0);\n}\n\nbool isShadowCascadeProjectedOnPixel(vec4 cascadeTexCoords) {\n bvec2 greaterThanZero = greaterThan(cascadeTexCoords.xy, vec2(0));\n bvec2 lessThanOne = lessThan(cascadeTexCoords.xy, vec2(1));\n return all(greaterThanZero) && all(lessThanOne);\n}\n\nint getFirstShadowCascadeOnPixel(int startCascadeIndex, vec4 worldPosition, out vec4 cascadeShadowCoords) {\n int cascadeIndex;\n startCascadeIndex = min(startCascadeIndex, getShadowCascadeCount()-1);\n for (cascadeIndex=startCascadeIndex ; cascadeIndex> 1;\n#endif\n\n // Offset for efficient PCF, see http://http.developer.nvidia.com/GPUGems/gpugems_ch11.html\n ivec2 offset = coords & ivec2(1,1);\n offset.y = (offset.x+offset.y) & 1;\n\n offsets.points[0] = shadowScale * vec3(offset + PCFkernel[0], 0.0);\n offsets.points[1] = shadowScale * vec3(offset + PCFkernel[1], 0.0);\n offsets.points[2] = shadowScale * vec3(offset + PCFkernel[2], 0.0);\n offsets.points[3] = shadowScale * vec3(offset + PCFkernel[3], 0.0);\n\n return offsets;\n}\n\nfloat evalShadowAttenuationPCF(int cascadeIndex, ShadowSampleOffsets offsets, vec4 shadowTexcoord, float bias) {\n shadowTexcoord.z -= bias;\n float shadowAttenuation = 0.25 * (\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[0]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[1]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[2]) +\n fetchShadow(cascadeIndex, shadowTexcoord.xyz + offsets.points[3])\n );\n return shadowAttenuation;\n}\n\nfloat evalShadowCascadeAttenuation(int cascadeIndex, ShadowSampleOffsets offsets, vec4 shadowTexcoord, float oneMinusNdotL) {\n float bias = getShadowFixedBias(cascadeIndex) + getShadowSlopeBias(cascadeIndex) * oneMinusNdotL;\n return evalShadowAttenuationPCF(cascadeIndex, offsets, shadowTexcoord, bias);\n}\n\nfloat evalShadowAttenuation(vec3 worldLightDir, vec4 worldPosition, float viewDepth, vec3 worldNormal) {\n ShadowSampleOffsets offsets = evalShadowFilterOffsets(worldPosition);\n vec4 cascadeShadowCoords[2];\n cascadeShadowCoords[0] = vec4(0);\n cascadeShadowCoords[1] = vec4(0);\n ivec2 cascadeIndices;\n float cascadeMix = determineShadowCascadesOnPixel(worldPosition, viewDepth, cascadeShadowCoords, cascadeIndices);\n\t\n // Adjust bias if we are at a grazing angle with light\n float oneMinusNdotL = 1.0 - clamp(dot(worldLightDir, worldNormal), 0, 1);\n vec2 cascadeAttenuations = vec2(1.0, 1.0);\n cascadeAttenuations.x = evalShadowCascadeAttenuation(cascadeIndices.x, offsets, cascadeShadowCoords[0], oneMinusNdotL);\n if (cascadeMix > 0.0 && cascadeIndices.y < getShadowCascadeCount()) {\n cascadeAttenuations.y = evalShadowCascadeAttenuation(cascadeIndices.y, offsets, cascadeShadowCoords[1], oneMinusNdotL);\n }\n float attenuation = mix(cascadeAttenuations.x, cascadeAttenuations.y, cascadeMix);\n // Falloff to max distance\n return mix(1.0, attenuation, evalShadowFalloff(viewDepth));\n}\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n\n\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\n// the albedo texture\nuniform sampler2D albedoMap;\n\n// the normal texture\nuniform sampler2D normalMap;\n\n// the specular texture\nuniform sampler2D specularMap;\n\n// the depth texture\nuniform sampler2D depthMap;\nuniform sampler2D linearZeyeMap;\n\n// the obscurance texture\nuniform sampler2D obscuranceMap;\n\n// the lighting texture\nuniform sampler2D lightingMap;\n\n\nstruct DeferredFragment {\n vec4 position;\n vec3 normal;\n float metallic;\n vec3 albedo;\n float obscurance;\n vec3 fresnel;\n float roughness;\n int mode;\n float scattering;\n float depthVal;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nDeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n vec4 specularVal;\n \n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n specularVal = texture(specularMap, texcoord);\n frag.obscurance = texture(obscuranceMap, texcoord).x;\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n frag.obscurance = min(specularVal.w, frag.obscurance);\n\n if (frag.mode == FRAG_MODE_SCATTERING) {\n frag.scattering = specularVal.x;\n }\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nDeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {\n vec4 normalVal;\n vec4 diffuseVal;\n\n DeferredFragment frag;\n frag.depthVal = -1.0;\n normalVal = texture(normalMap, texcoord);\n diffuseVal = texture(albedoMap, texcoord);\n\n // Unpack the normal from the map\n frag.normal = unpackNormal(normalVal.xyz);\n frag.roughness = normalVal.a;\n\n // Diffuse color and unpack the mode and the metallicness\n frag.albedo = diffuseVal.xyz;\n frag.scattering = 0.0;\n unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);\n\n //frag.emissive = specularVal.xyz;\n frag.obscurance = 1.0;\n\n frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);\n\n return frag;\n}\n\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n\n return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);\n}\n\n// This method to unpack position is fastesst\nvec4 unpackDeferredPositionFromZdb(vec2 texcoord) {\n float Zdb = texture(depthMap, texcoord).x;\n\treturn unpackDeferredPosition(Zdb, texcoord);\n}\n\nvec4 unpackDeferredPositionFromZeye(vec2 texcoord) {\n float Zeye = -texture(linearZeyeMap, texcoord).x;\n int side = 0;\n if (isStereo()) {\n if (texcoord.x > 0.5) {\n texcoord.x -= 0.5;\n side = 1;\n }\n texcoord.x *= 2.0;\n }\n return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);\n}\n\nDeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {\n\n float depthValue = texture(depthMap, texcoord).r;\n\n DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);\n\n frag.depthVal = depthValue;\n frag.position = unpackDeferredPosition(frag.depthVal, texcoord);\n\n return frag;\n}\n\n\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\n\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\n\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\n\n\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 normal, vec3 albedo, vec3 lightmap) {\n Light light = getKeyLight();\n LightAmbient ambient = getLightAmbient();\n\n // Catch normals perpendicular to the projection plane, hence the magic number for the threshold\n // It should be just 0, but we have inaccuracy so we overshoot\n const float PERPENDICULAR_THRESHOLD = -0.005;\n vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0)); // transform to worldspace\n float diffuseDot = dot(fragNormal, -getLightDirection(light));\n float facingLight = step(PERPENDICULAR_THRESHOLD, diffuseDot); \n\n // Reevaluate the shadow attenuation for light facing fragments\n float lightAttenuation = (1.0 - facingLight) + facingLight * shadowAttenuation;\n\n // Diffuse light is the lightmap dimmed by shadow\n vec3 diffuseLight = lightAttenuation * lightmap;\n\n // Ambient light is the lightmap when in shadow\n vec3 ambientLight = (1.0 - lightAttenuation) * lightmap * getLightAmbientIntensity(ambient);\n\n return isLightmapEnabled() * obscurance * albedo * (diffuseLight + ambientLight);\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n return specularLight;\n}\n\n\nfloat curvatureAO(in float k) {\n return 1.0f - (0.0022f * k * k) + (0.0776f * k) + 0.7369f;\n}\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz;\n\n\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nif (scattering * isScatteringEnabled() > 0.0) {\n float ambientOcclusion = curvatureAO(lowNormalCurvature.w * 20.0f) * 0.5f;\n float ambientOcclusionHF = curvatureAO(midNormalCurvature.w * 8.0f) * 0.5f;\n ambientOcclusion = min(ambientOcclusion, ambientOcclusionHF);\n\n obscurance = min(obscurance, ambientOcclusion);\n\n // Diffuse from ambient\n diffuse = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceLowNormal).xyz;\n\n // Scattering ambient specular is the same as non scattering for now\n // TODO: we should use the same specular answer as for direct lighting\n }\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\n// the curvature texture\nuniform sampler2D curvatureMap;\n\nvec4 fetchCurvature(vec2 texcoord) {\n return texture(curvatureMap, texcoord);\n}\n\n// the curvature texture\nuniform sampler2D diffusedCurvatureMap;\n\nvec4 fetchDiffusedCurvature(vec2 texcoord) {\n return texture(diffusedCurvatureMap, texcoord);\n}\n\nvoid unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {\n midNormalCurvature = fetchCurvature(texcoord);\n lowNormalCurvature = fetchDiffusedCurvature(texcoord);\n midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);\n lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);\n midNormalCurvature.w = (midNormalCurvature.w * 2.0 - 1.0);\n lowNormalCurvature.w = (lowNormalCurvature.w * 2.0 - 1.0);\n}\n\nvec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,\n vec3 albedo, vec3 fresnel, float metallic, float roughness\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normal);\n vec3 fragPositionWS = vec3(invViewMat * vec4(position, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance\n,scattering, midNormalCurvature, lowNormalCurvature \n);\n color += ambientDiffuse;\n color += ambientSpecular;\n\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n color += directionalDiffuse;\n color += directionalSpecular;\n\n // Attenuate the light if haze effect selected\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_KEYLIGHT_ATTENUATED) == HAZE_MODE_IS_KEYLIGHT_ATTENUATED) {\n color = computeHazeColorKeyLightAttenuation(color, lightDirection, fragPositionWS); \n }\n\n return color;\n}\n\n\n\nlayout(location = 0) in vec2 _texCoord0;\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n DeferredFrameTransform deferredTransform = getDeferredFrameTransform();\n DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);\n\n vec4 viewPos = vec4(frag.position.xyz, 1.0);\n vec4 worldPos = getViewInverse() * viewPos;\n Light shadowLight = getKeyLight();\n vec3 worldLightDirection = getLightDirection(shadowLight);\n float shadowAttenuation = evalShadowAttenuation(worldLightDirection, worldPos, -viewPos.z, frag.normal);\n\n // Light mapped or not ?\n if (frag.mode == FRAG_MODE_UNLIT) {\n discard;\n } else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {\n discard;\n } else {\n vec4 midNormalCurvature = vec4(0);\n vec4 lowNormalCurvature = vec4(0);\n if (frag.mode == FRAG_MODE_SCATTERING) {\n unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);\n }\n vec3 color = evalSkyboxGlobalColor(\n getViewInverse(),\n shadowAttenuation,\n frag.obscurance,\n frag.position.xyz,\n frag.normal,\n frag.albedo,\n frag.fresnel,\n frag.metallic,\n frag.roughness,\n frag.scattering,\n midNormalCurvature,\n lowNormalCurvature);\n\n\n _fragColor = vec4(color, 1.0);\n }\n}\n\n\n"
+ },
+ "eKhbZNIqwNLpthQKQUjZSg==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _tangentWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "eXnWX1gX5SpBEzZA1SEWsw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_translucent_unlit.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 2/3/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nin vec2 _texCoord0;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n _fragColor = vec4(albedo * isUnlitEnabled(), opacity);\n}\n\n\n"
+ },
+ "fHAYL5kFcZV9YTBxlTKatw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_translucent_unlit.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 2/3/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nin vec2 _texCoord0;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n _fragColor = vec4(albedo * isUnlitEnabled(), opacity);\n}\n\n\n"
+ },
+ "gGKhR05vzJLx7EjPR9F1tA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_lightmap_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out vec4 _positionWS;\n\nvoid main(void) {\n // pass along the color in linear space\n _color = color_sRGBToLinear(inColor.xyz);\n\n // and the texture coordinates\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord1.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_lightmap_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\n#endif\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nuniform sampler2D emissiveMap;\nvec3 fetchLightmapMap(vec2 uv) {\n vec2 emissiveParams = getTexMapArray()._lightmapParams.xy;\n return (vec3(emissiveParams.x) + emissiveParams.y * texture(emissiveMap, uv).rgb);\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\nlayout(location = 5) in vec4 _positionWS;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedo = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\n\n vec3 lightmapVal = fetchLightmapMap(_texCoord1);\n\n\n packDeferredFragmentLightmap(\n normalize(_normalWS), \n evalOpaqueFinalAlpha(getMaterialOpacity(mat), albedo.a),\n getMaterialAlbedo(mat) * albedo.rgb * _color,\n getMaterialRoughness(mat) * roughness,\n getMaterialMetallic(mat) * metallicTex,\n /*metallicTex, // no use of */getMaterialFresnel(mat),\n lightmapVal+fadeEmissive);\n}\n\n\n"
+ },
+ "grRVm4UWiHKN4KkQrURvQA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTextureMirroredX.frag\n//\n// Draw texture 0 fetched at (1.0 - texcoord.x, texcoord.y)\n//\n// Created by Sam Gondelman on 10/24/2017\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nuniform sampler2D colorMap;\n\nlayout(location = 0) in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = texture(colorMap, vec2(1.0 - varTexCoord0.x, varTexCoord0.y));\n}\n\n\n"
+ },
+ "hBKI+uiq2k/kRg0/lQeo3g==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_shadow_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionWS;\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFadeClip(fadeParams, _positionWS.xyz);\n\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "huZ58zhKPN7/losUzJUE3A==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTexture.frag\n//\n// Draw texture 0 fetched at texcoord.xy\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nuniform sampler2D colorMap;\n\nlayout(location = 0) in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = texture(colorMap, varTexCoord0);\n}\n\n\n"
+ },
+ "i8tBs+bcbYh7HTWMDox53A==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n\n\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_translucent_normal_map.frag\n// fragment shader\n//\n// Created by Olivier Prat on 23/01/2018.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _tangentWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n int matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "iBB5qMCPEJ9z18yrXsFeug==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// subsurfaceScattering_makeProfile.frag\n//\n// Created by Sam Gateau on 6/27/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nfloat gaussian(float v, float r) {\n const float _PI = 3.14159265358979523846;\n return (1.0 / sqrt(2.0 * _PI * v)) * exp(-(r*r) / (2.0 * v));\n}\n\nvec3 scatter(float r) {\n // r is the distance expressed in millimeter\n // returns the scatter reflectance\n // Values from GPU Gems 3 \"Advanced Skin Rendering\".\n // Originally taken from real life samples.\n const vec4 profile[6] = vec4[6](\n vec4(0.0064, 0.233, 0.455, 0.649),\n vec4(0.0484, 0.100, 0.336, 0.344),\n vec4(0.1870, 0.118, 0.198, 0.000),\n vec4(0.5670, 0.113, 0.007, 0.007),\n vec4(1.9900, 0.358, 0.004, 0.000),\n vec4(7.4100, 0.078, 0.000, 0.000)\n );\n const int profileNum = 6;\n\n vec3 ret = vec3(0.0);\n for (int i = 0; i < profileNum; i++) {\n float v = profile[i].x * 1.414;\n float g = gaussian(v, r);\n ret += g * profile[i].yzw;\n }\n\n return ret;\n}\n\n\n\nvec3 generateProfile(vec2 uv) {\n return scatter(uv.x * 2.0);\n}\n\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = vec4(generateProfile(varTexCoord0.xy), 1.0);\n}\n\n\n"
+ },
+ "iNjIRgAkEKJSdds4Zoo9Bg==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawViewportQuatTransformTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] filling in \n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform but applied to the Texcoord\n vec4 tc = vec4((pos.xy + 1.0) * 0.5, pos.zw);\n\n TransformObject obj = getTransformObject();\n { // transformModelToWorldPos\n tc = (obj._model * tc);\n }\n\n\n gl_Position = pos;\n varTexCoord0 = tc.xy;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// surfaceGeometry_makeCurvature.frag\n//\n// Created by Sam Gateau on 6/3/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nstruct CameraCorrection {\n mat4 _correction;\n mat4 _correctionInverse;\n \n mat4 _prevView;\n mat4 _prevViewInverse;\n};\n \nuniform cameraCorrectionBuffer {\n CameraCorrection cameraCorrection;\n};\n\nstruct DeferredFrameTransform {\n vec4 _pixelInfo;\n vec4 _invPixelInfo;\n vec4 _depthInfo;\n vec4 _stereoInfo;\n mat4 _projection[2];\n mat4 _invProjection[2];\n mat4 _projectionMono;\n mat4 _viewInverse;\n mat4 _view;\n\tmat4 _projectionUnJittered[2];\n\tmat4 _invProjectionUnJittered[2];\n};\n\nuniform deferredFrameTransformBuffer {\n DeferredFrameTransform frameTransform;\n};\n\nvec2 getWidthHeight(int resolutionLevel) {\n return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);\n}\n\nvec2 getInvWidthHeight() {\n return frameTransform._invPixelInfo.xy;\n}\n\nfloat getProjScaleEye() {\n return frameTransform._projection[0][1][1];\n}\n\nfloat getProjScale(int resolutionLevel) {\n return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;\n}\nmat4 getProjection(int side) {\n return frameTransform._projection[side];\n}\nmat4 getProjectionMono() {\n return frameTransform._projectionMono;\n}\nmat4 getUnjitteredProjection(int side) {\n\treturn frameTransform._projectionUnJittered[side];\n}\nmat4 getUnjitteredInvProjection(int side) {\n\treturn frameTransform._invProjectionUnJittered[side];\n}\n\n// positive near distance of the projection\nfloat getProjectionNear() {\n float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];\n float planeD = frameTransform._projection[0][3][2];\n return planeD / planeC;\n}\n\n// positive far distance of the projection\nfloat getPosLinearDepthFar() {\n return -frameTransform._depthInfo.z;\n}\n\nmat4 getViewInverse() {\n return frameTransform._viewInverse * cameraCorrection._correctionInverse;\n}\n\nmat4 getView() {\n return cameraCorrection._correction * frameTransform._view;\n}\n\nmat4 getPreviousView() {\n return cameraCorrection._prevView;\n}\n\nmat4 getPreviousViewInverse() {\n return cameraCorrection._prevViewInverse;\n}\n\nDeferredFrameTransform getDeferredFrameTransform() {\n DeferredFrameTransform result = frameTransform;\n result._view = getView(); \n result._viewInverse = getViewInverse(); \n return result;\n}\n\nbool isStereo() {\n return frameTransform._stereoInfo.x > 0.0f;\n}\n\nfloat getStereoSideWidth(int resolutionLevel) {\n return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);\n}\nfloat getStereoSideHeight(int resolutionLevel) {\n return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);\n}\n\nvec2 getSideImageSize(int resolutionLevel) {\n return vec2(float(int(frameTransform._stereoInfo.y) >> resolutionLevel), float(int(frameTransform._pixelInfo.w) >> resolutionLevel));\n}\n\nivec4 getStereoSideInfo(int xPos, int resolutionLevel) {\n int sideWidth = int(getStereoSideWidth(resolutionLevel));\n return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());\n}\n\nfloat evalZeyeFromZdb(float depth) {\n return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);\n}\n\nfloat evalZdbFromZeye(float Zeye) {\n return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);\n}\n\nvec3 evalEyeNormal(vec3 C) {\n return normalize(cross(dFdx(C), dFdy(C)));\n}\n\nvec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {\n // compute the view space position using the depth\n vec3 clipPos;\n clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;\n vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);\n return eyePos.xyz / eyePos.w;\n}\n\nvec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {\n\tfloat Zdb = evalZdbFromZeye(Zeye);\n return evalEyePositionFromZdb(side, Zdb, texcoord);\n}\n\nivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {\n ivec2 fragPos = ivec2(glFragCoord.xy);\n\n stereoSide = getStereoSideInfo(fragPos.x, 0);\n\n pixelPos = fragPos;\n pixelPos.x -= stereoSide.y;\n\n texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();\n \n return fragPos;\n}\n\n\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\nstruct SurfaceGeometryParams {\n // Resolution info\n vec4 resolutionInfo;\n // Curvature algorithm\n vec4 curvatureInfo;\n};\n\nuniform surfaceGeometryParamsBuffer {\n SurfaceGeometryParams params;\n};\n\nfloat getCurvatureDepthThreshold() {\n return params.curvatureInfo.x;\n}\n\nfloat getCurvatureBasisScale() {\n return params.curvatureInfo.y;\n}\n\nfloat getCurvatureScale() {\n return params.curvatureInfo.w;\n}\n\nbool isFullResolution() {\n return params.resolutionInfo.w == 0.0;\n}\n\n\nuniform sampler2D linearDepthMap;\nfloat getZEye(ivec2 pixel) {\n return -texelFetch(linearDepthMap, pixel, 0).x;\n}\nfloat getZEyeLinear(vec2 texcoord) {\n return -texture(linearDepthMap, texcoord).x;\n}\n\nvec2 sideToFrameTexcoord(vec2 side, vec2 texcoordPos) {\n return vec2((texcoordPos.x + side.x) * side.y, texcoordPos.y);\n}\n\nuniform sampler2D normalMap;\n\nvec3 getRawNormal(vec2 texcoord) {\n return texture(normalMap, texcoord).xyz;\n}\n\nvec3 getWorldNormal(vec2 texcoord) {\n vec3 rawNormal = getRawNormal(texcoord);\n if (isFullResolution()) {\n return unpackNormal(rawNormal);\n } else {\n return normalize((rawNormal - vec3(0.5)) * 2.0);\n }\n}\n\nvec3 getWorldNormalDiff(vec2 texcoord, vec2 delta) {\n return getWorldNormal(texcoord + delta) - getWorldNormal(texcoord - delta);\n}\n\nfloat getEyeDepthDiff(vec2 texcoord, vec2 delta) {\n return getZEyeLinear(texcoord + delta) - getZEyeLinear(texcoord - delta);\n}\n\n\n\nin vec2 varTexCoord0;\nout vec4 outFragColor;\n\nvoid main(void) {\n // Pixel being shaded\n ivec2 pixelPos;\n vec2 texcoordPos;\n ivec4 stereoSide;\n ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);\n vec2 stereoSideClip = vec2(stereoSide.x, (isStereo() ? 0.5 : 1.0));\n\n // Texcoord to fetch in the deferred texture are the exact UVs comming from vertex shader\n // sideToFrameTexcoord(stereoSideClip, texcoordPos);\n vec2 frameTexcoordPos = varTexCoord0;\n\n // Fetch the z under the pixel (stereo or not)\n float Zeye = getZEye(framePixelPos);\n if (Zeye <= -getPosLinearDepthFar()) {\n outFragColor = vec4(1.0, 0.0, 0.0, 0.0);\n return;\n }\n\n float nearPlaneScale = 0.5 * getProjectionNear();\n\n vec3 worldNormal = getWorldNormal(frameTexcoordPos);\n\n // The position of the pixel fragment in Eye space then in world space\n vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, texcoordPos);\n // vec3 worldPos = (frameTransform._viewInverse * vec4(eyePos, 1.0)).xyz;\n\n /* if (texcoordPos.y > 0.5) {\n outFragColor = vec4(fract(10.0 * worldPos.xyz), 1.0);\n } else {\n outFragColor = vec4(fract(10.0 * eyePos.xyz), 1.0);\n }*/\n // return;\n\n // Calculate the perspective scale.\n // Clamp to 0.5\n // float perspectiveScale = max(0.5, (-getProjScaleEye() / Zeye));\n float perspectiveScale = max(0.5, (-getCurvatureBasisScale() * getProjectionNear() / Zeye));\n\n // Calculate dF/du and dF/dv\n vec2 viewportScale = perspectiveScale * getInvWidthHeight();\n vec2 du = vec2( viewportScale.x * (float(stereoSide.w) > 0.0 ? 0.5 : 1.0), 0.0f );\n vec2 dv = vec2( 0.0f, viewportScale.y );\n\n vec4 dFdu = vec4(getWorldNormalDiff(frameTexcoordPos, du), getEyeDepthDiff(frameTexcoordPos, du));\n vec4 dFdv = vec4(getWorldNormalDiff(frameTexcoordPos, dv), getEyeDepthDiff(frameTexcoordPos, dv));\n\n float threshold = getCurvatureDepthThreshold();\n dFdu *= step(abs(dFdu.w), threshold);\n dFdv *= step(abs(dFdv.w), threshold); \n\n // Calculate ( du/dx, du/dy, du/dz ) and ( dv/dx, dv/dy, dv/dz )\n // Eval px, py, pz world positions of the basis centered on the world pos of the fragment\n float axeLength = nearPlaneScale;\n\n vec3 ax = (frameTransform._view[0].xyz * axeLength);\n vec3 ay = (frameTransform._view[1].xyz * axeLength);\n\n\n vec3 az = (frameTransform._view[2].xyz * axeLength);\n\n vec4 px = vec4(eyePos + ax, 0.0);\n vec4 py = vec4(eyePos + ay, 0.0);\n vec4 pz = vec4(eyePos + az, 0.0);\n\n\n /* if (texcoordPos.y > 0.5) {\n outFragColor = vec4(fract(px.xyz), 1.0);\n } else {\n outFragColor = vec4(fract(eyePos.xyz), 1.0);\n }*/\n // return;\n\n\n /* IN case the axis end point goes behind mid way near plane, this shouldn't happen\n if (px.z >= -nearPlaneScale) {\n outFragColor = vec4(1.0, 0.0, 0.0, 1.0);\n return;\n } else if (py.z >= -nearPlaneScale) {\n outFragColor = vec4(0.0, 1.0, 0.0, 1.0);\n return;\n } else if (pz.z >= -nearPlaneScale) {\n outFragColor = vec4(0.0, 0.0, 1.0, 1.0);\n return;\n }*/\n \n\n // Project px, py pz to homogeneous clip space\n // mat4 viewProj = getProjection(stereoSide.x);\n mat4 viewProj = getProjectionMono();\n px = viewProj * px;\n py = viewProj * py;\n pz = viewProj * pz;\n\n\n // then to normalized clip space\n px.xy /= px.w;\n py.xy /= py.w;\n pz.xy /= pz.w;\n\n vec2 nclipPos = (texcoordPos - 0.5) * 2.0;\n\n\n //vec4 clipPos = frameTransform._projection[stereoSide.x] * vec4(eyePos, 1.0);\n vec4 clipPos = getProjectionMono() * vec4(eyePos, 1.0);\n nclipPos = clipPos.xy / clipPos.w;\n\n /* if (texcoordPos.y > 0.5) {\n // outFragColor = vec4(fract(10.0 * worldPos.xyz), 1.0);\n outFragColor = vec4(fract(10.0 * (nclipPos)), 0.0, 1.0);\n\n } else {\n outFragColor = vec4(fract(10.0 * (clipPos.xy / clipPos.w)), 0.0, 1.0);\n // outFragColor = vec4(nclipPos * 0.5 + 0.5, 0.0, 1.0);\n }*/\n //return;\n\n\n float pixPerspectiveScaleInv = 1.0 / (perspectiveScale);\n px.xy = (px.xy - nclipPos) * pixPerspectiveScaleInv;\n py.xy = (py.xy - nclipPos) * pixPerspectiveScaleInv;\n pz.xy = (pz.xy - nclipPos) * pixPerspectiveScaleInv;\n \n /* if (texcoordPos.y > 0.5) {\n // outFragColor = vec4(fract(10.0 * worldPos.xyz), 1.0);\n outFragColor = vec4(fract(10.0 * (px.xy)), 0.0, 1.0);\n\n } else {\n outFragColor = vec4(fract(10.0 * (py.xy)), 0.0, 1.0);\n // outFragColor = vec4(nclipPos * 0.5 + 0.5, 0.0, 1.0);\n }*/\n // return;\n\n // Calculate dF/dx, dF/dy and dF/dz using chain rule\n vec4 dFdx = dFdu * px.x + dFdv * px.y;\n vec4 dFdy = dFdu * py.x + dFdv * py.y;\n vec4 dFdz = dFdu * pz.x + dFdv * pz.y;\n\n vec3 trace = vec3(dFdx.x, dFdy.y, dFdz.z);\n\n /*if (dot(trace, trace) > params.curvatureInfo.w) {\n outFragColor = vec4(dFdx.x, dFdy.y, dFdz.z, 1.0);\n return;\n }*/\n\n // Calculate the mean curvature\n float meanCurvature = ((trace.x + trace.y + trace.z) * 0.33333333333333333) * params.curvatureInfo.w;\n\n outFragColor = vec4(vec3(worldNormal + 1.0) * 0.5, (meanCurvature + 1.0) * 0.5);\n}\n\n\n"
+ },
+ "iSWjcjtbOXRq+yE3M7ZmLw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n\n\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// model_translucent_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 23/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n\n\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec3 _normalWS;\nin vec3 _tangentWS;\nin vec3 _color;\nin float _alpha;\nin vec4 _positionWS;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n int matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "iVdwQ2dEvB1gkV0p4/adjA==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// overlay3D.vert\n// vertex shader\n//\n// Created by Sam Gateau on 6/16/15.\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nout vec3 _color;\nout float _alpha;\nout vec2 _texCoord0;\nout vec4 _positionES;\nout vec3 _normalWS;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n _texCoord0 = inTexCoord0.st;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// overlay3D_translucent_unlit.frag\n// fragment shader\n//\n// Created by Zach Pomerantz on 2/2/2016.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nuniform sampler2D originalTexture;\n\nin vec2 _texCoord0;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec4 albedo = texture(originalTexture, _texCoord0);\n\n _fragColor = vec4(albedo.rgb * _color, albedo.a * _alpha);\n}\n\n\n"
+ },
+ "jWfLQBP3hY4gUntsrqpSdw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade_dq.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/29/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\nmat4 dualQuatToMat4(vec4 real, vec4 dual) {\n float twoRealXSq = 2.0 * real.x * real.x;\n float twoRealYSq = 2.0 * real.y * real.y;\n float twoRealZSq = 2.0 * real.z * real.z;\n float twoRealXY = 2.0 * real.x * real.y;\n float twoRealXZ = 2.0 * real.x * real.z;\n float twoRealXW = 2.0 * real.x * real.w;\n float twoRealZW = 2.0 * real.z * real.w;\n float twoRealYZ = 2.0 * real.y * real.z;\n float twoRealYW = 2.0 * real.y * real.w;\n vec4 col0 = vec4(1.0 - twoRealYSq - twoRealZSq,\n twoRealXY + twoRealZW,\n twoRealXZ - twoRealYW,\n 0.0);\n vec4 col1 = vec4(twoRealXY - twoRealZW,\n 1.0 - twoRealXSq - twoRealZSq,\n twoRealYZ + twoRealXW,\n 0.0);\n vec4 col2 = vec4(twoRealXZ + twoRealYW,\n twoRealYZ - twoRealXW,\n 1.0 - twoRealXSq - twoRealYSq,\n 0.0);\n vec4 col3 = vec4(2.0 * (-dual.w * real.x + dual.x * real.w - dual.y * real.z + dual.z * real.y),\n 2.0 * (-dual.w * real.y + dual.x * real.z + dual.y * real.w - dual.z * real.x),\n 2.0 * (-dual.w * real.z - dual.x * real.y + dual.y * real.x + dual.z * real.w),\n 1.0);\n\n return mat4(col0, col1, col2, col3);\n}\n\n// dual quaternion linear blending\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n\n // linearly blend scale and dual quaternion components\n vec4 sAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 rAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 dAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 cAccum = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 polarityReference = clusterMatrices[skinClusterIndex[0]][1];\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n\n vec4 scale = clusterMatrix[0];\n vec4 real = clusterMatrix[1];\n vec4 dual = clusterMatrix[2];\n vec4 cauterizedPos = clusterMatrix[3];\n\n // to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.\n float dqClusterWeight = clusterWeight;\n if (dot(real, polarityReference) < 0.0) {\n dqClusterWeight = -clusterWeight;\n }\n\n sAccum += scale * clusterWeight;\n rAccum += real * dqClusterWeight;\n dAccum += dual * dqClusterWeight;\n cAccum += cauterizedPos * clusterWeight;\n }\n\n // normalize dual quaternion\n float norm = length(rAccum);\n rAccum /= norm;\n dAccum /= norm;\n\n // conversion from dual quaternion to 4x4 matrix.\n mat4 m = dualQuatToMat4(rAccum, dAccum);\n\n // sAccum.w indicates the amount of cauterization for this vertex.\n // 0 indicates no cauterization and 1 indicates full cauterization.\n // TODO: make this cauterization smoother or implement full dual-quaternion scale support.\n const float CAUTERIZATION_THRESHOLD = 0.1;\n if (sAccum.w > CAUTERIZATION_THRESHOLD) {\n skinnedPosition = cAccum;\n } else {\n sAccum.w = 1.0;\n skinnedPosition = m * (sAccum * inPosition);\n }\n\n skinnedNormal = vec3(m * vec4(inNormal, 0));\n skinnedTangent = vec3(m * vec4(inTangent, 0));\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_translucent_normal_map.frag\n// fragment shader\n//\n// Created by Olivier Prat on 23/01/2018.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n\n\n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _tangentWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n Material mat = getMaterial();\n int matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\n\n\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n\t\tfragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "kVm0Zs3mBDfls8rIqIm6AQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_normal_map_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _tangentWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 interpolatedTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);\n interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n\n\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedNormal.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n interpolatedTangent.xyz = vec3(dot(mr0, interpolatedTangent.xyz), dot(mr1, interpolatedTangent.xyz), dot(mr2, interpolatedTangent.xyz));\n }\n\n\n _normalWS = interpolatedNormal.xyz;\n _tangentWS = interpolatedTangent.xyz;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _tangentWS;\nlayout(location = 6) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive + fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "kdIMKfBQhKmG8XQB+bWcRw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTransformUnitQuad.vert\n// \n// Draw and transform the unit quad [-1,-1 -> 1,1]\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, 0.0, 1.0),\n vec4(1.0, -1.0, 0.0, 1.0),\n vec4(-1.0, 1.0, 0.0, 1.0),\n vec4(1.0, 1.0, 0.0, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * pos);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawTexture.frag\n//\n// Draw texture 0 fetched at texcoord.xy\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\nuniform sampler2D colorMap;\n\nlayout(location = 0) in vec2 varTexCoord0;\nlayout(location = 0) out vec4 outFragColor;\n\nvoid main(void) {\n outFragColor = texture(colorMap, varTexCoord0);\n}\n\n\n"
+ },
+ "kw1zhSuYtq0S+c7hnfMD5Q==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_transparent_textured.frag\n// fragment shader\n//\n// Created by Sam Gateau on 4/3/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:09 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\n// the interpolated normal\nin vec3 _normalWS;\nin vec4 _color;\nin vec2 _texCoord0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0);\n float colorAlpha = _color.a * texel.a;\n\n packDeferredFragmentTranslucent(\n normalize(_normalWS),\n colorAlpha,\n _color.rgb * texel.rgb,\n DEFAULT_FRESNEL,\n DEFAULT_ROUGHNESS);\n}\n\n"
+ },
+ "kzz1QfdnZMN4le5dkFTrVw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/045/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nout vec4 _positionES;\nout vec2 _texCoord0;\nout vec2 _texCoord1;\nout vec3 _normalWS;\nout vec3 _color;\nout float _alpha;\nout vec4 _positionWS;\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n vec3 interpolatedNormal = vec3(0.0, 0.0, 0.0);\n\n skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);\n\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n\n\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * position);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS.xyz = vec3(dot(mr0, interpolatedNormal.xyz), dot(mr1, interpolatedNormal.xyz), dot(mr2, interpolatedNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n\n\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive+fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "moNEFfK86uWpWgQ6q6nsxA==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_normal_map_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _tangentWS;\nlayout(location = 6) out vec3 _color;\nlayout(location = 7) out float _alpha;\n\nvoid main(void) {\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _tangentWS = vec3(dot(mr0, inTangent.xyz), dot(mr1, inTangent.xyz), dot(mr2, inTangent.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _tangentWS;\nlayout(location = 6) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive + fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "mtYx5P+wGdOJ+2MGHppNNw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// skin_model_shadow.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_CLUSTERS = 128;\nconst int INDICES_PER_VERTEX = 4;\n\n// func declareUseDualQuaternionSkinning(USE_DUAL_QUATERNION_SKINNING)\n\n// if not SKINNING_SLH\nlayout(std140) uniform skinClusterBuffer {\n mat4 clusterMatrices[MAX_CLUSTERS];\n};\n\n// USE_DUAL_QUATERNION_SKINNING\n\nvoid skinPosition(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, out vec4 skinnedPosition) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n}\n\nvoid skinPositionNormal(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal,\n out vec4 skinnedPosition, out vec3 skinnedNormal) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n}\n\nvoid skinPositionNormalTangent(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPosition, vec3 inNormal, vec3 inTangent,\n out vec4 skinnedPosition, out vec3 skinnedNormal, out vec3 skinnedTangent) {\n vec4 newPosition = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newNormal = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 newTangent = vec4(0.0, 0.0, 0.0, 0.0);\n\n for (int i = 0; i < INDICES_PER_VERTEX; i++) {\n mat4 clusterMatrix = clusterMatrices[(skinClusterIndex[i])];\n float clusterWeight = skinClusterWeight[i];\n newPosition += clusterMatrix * inPosition * clusterWeight;\n newNormal += clusterMatrix * vec4(inNormal.xyz, 0.0) * clusterWeight;\n newTangent += clusterMatrix * vec4(inTangent.xyz, 0.0) * clusterWeight;\n }\n\n skinnedPosition = newPosition;\n skinnedNormal = newNormal.xyz;\n skinnedTangent = newTangent.xyz;\n}\n\n// if USE_DUAL_QUATERNION_SKINNING\n\n\n\nvoid main(void) {\n vec4 position = vec4(0.0, 0.0, 0.0, 0.0);\n skinPosition(inSkinClusterIndex, inSkinClusterWeight, inPosition, position);\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToClipPos\n { // transformModelToMonoClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * position);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n }\n\n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// skin_model_shadow.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 3/24/14.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) out vec4 _fragColor;\n\nvoid main(void) {\n // pass-through to set z-buffer\n _fragColor = vec4(1.0, 1.0, 1.0, 0.0);\n}\n\n\n"
+ },
+ "mwO+fp+K7wX+h3KlIzUQEw==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n// model_translucent_fade.frag\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n// glsl / C++ compatible source as interface for Light\n#ifndef LightVolume_Shared_slh\n#define LightVolume_Shared_slh\n\n// Light.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightVolumeConstRef LightVolume\n\nstruct LightVolume {\n vec4 positionRadius;\n vec4 directionSpotCos;\n};\n\nbool lightVolume_isPoint(LightVolume lv) { return bool(lv.directionSpotCos.w < 0.f); }\nbool lightVolume_isSpot(LightVolume lv) { return bool(lv.directionSpotCos.w >= 0.f); }\n\nvec3 lightVolume_getPosition(LightVolume lv) { return lv.positionRadius.xyz; }\nfloat lightVolume_getRadius(LightVolume lv) { return lv.positionRadius.w; }\nfloat lightVolume_getRadiusSquare(LightVolume lv) { return lv.positionRadius.w * lv.positionRadius.w; }\nvec3 lightVolume_getDirection(LightVolume lv) { return lv.directionSpotCos.xyz; } // direction is -Z axis\n\nfloat lightVolume_getSpotAngleCos(LightVolume lv) { return lv.directionSpotCos.w; }\nvec2 lightVolume_getSpotOutsideNormal2(LightVolume lv) { return vec2(-sqrt(1.0 - lv.directionSpotCos.w * lv.directionSpotCos.w), lv.directionSpotCos.w); }\n\n\nbool lightVolume_clipFragToLightVolumePoint(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2) {\n fragLightVecLen2.xyz = lightVolume_getPosition(lv) - fragPos.xyz;\n fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);\n\n // Kill if too far from the light center\n return (fragLightVecLen2.w <= lightVolume_getRadiusSquare(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpotSide(LightVolume lv, vec4 fragLightDirLen, out float cosSpotAngle) {\n // Kill if not in the spot light (ah ah !)\n cosSpotAngle = max(-dot(fragLightDirLen.xyz, lightVolume_getDirection(lv)), 0.0);\n return (cosSpotAngle >= lightVolume_getSpotAngleCos(lv));\n}\n\nbool lightVolume_clipFragToLightVolumeSpot(LightVolume lv, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {\n if (!lightVolume_clipFragToLightVolumePoint(lv, fragPos, fragLightVecLen2)) {\n return false;\n }\n\n // Allright we re valid in the volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n\n return lightVolume_clipFragToLightVolumeSpotSide(lv, fragLightDirLen, cosSpotAngle);\n}\n\n#endif\n\n\n// // glsl / C++ compatible source as interface for Light\n#ifndef LightIrradiance_Shared_slh\n#define LightIrradiance_Shared_slh\n\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define LightIrradianceConstRef LightIrradiance\n\nstruct LightIrradiance {\n vec4 colorIntensity;\n // falloffRadius, cutoffRadius, falloffSpot, spare\n vec4 attenuation;\n};\n\n\nvec3 lightIrradiance_getColor(LightIrradiance li) { return li.colorIntensity.xyz; }\nfloat lightIrradiance_getIntensity(LightIrradiance li) { return li.colorIntensity.w; }\nvec3 lightIrradiance_getIrradiance(LightIrradiance li) { return li.colorIntensity.xyz * li.colorIntensity.w; }\nfloat lightIrradiance_getFalloffRadius(LightIrradiance li) { return li.attenuation.x; }\nfloat lightIrradiance_getCutoffRadius(LightIrradiance li) { return li.attenuation.y; }\nfloat lightIrradiance_getFalloffSpot(LightIrradiance li) { return li.attenuation.z; }\n\n\n// Light is the light source its self, d is the light's distance calculated as length(unnormalized light vector).\nfloat lightIrradiance_evalLightAttenuation(LightIrradiance li, float d) {\n float radius = lightIrradiance_getFalloffRadius(li);\n float cutoff = lightIrradiance_getCutoffRadius(li);\n float denom = (d / radius) + 1.0;\n float attenuation = 1.0 / (denom * denom);\n\n // \"Fade\" the edges of light sources to make things look a bit more attractive.\n // Note: this tends to look a bit odd at lower exponents.\n attenuation *= min(1.0, max(0.0, -(d - cutoff)));\n\n return attenuation;\n}\n\n\nfloat lightIrradiance_evalLightSpotAttenuation(LightIrradiance li, float cosA) {\n return pow(cosA, lightIrradiance_getFalloffSpot(li));\n}\n\n\n#endif\n\n\n// // NOw lets define Light\nstruct Light {\n LightVolume volume;\n LightIrradiance irradiance;\n};\n\nbool light_isSpot(Light l) { return lightVolume_isSpot(l.volume); }\n\nvec3 getLightPosition(Light l) { return lightVolume_getPosition(l.volume); }\nvec3 getLightDirection(Light l) { return lightVolume_getDirection(l.volume); }\n\nvec3 getLightColor(Light l) { return lightIrradiance_getColor(l.irradiance); }\nfloat getLightIntensity(Light l) { return lightIrradiance_getIntensity(l.irradiance); }\nvec3 getLightIrradiance(Light l) { return lightIrradiance_getIrradiance(l.irradiance); }\n\n// Ambient lighting needs extra info provided from a different Buffer\n// glsl / C++ compatible source as interface for Light\n#ifndef SphericalHarmonics_Shared_slh\n#define SphericalHarmonics_Shared_slh\n\n// SphericalHarmonics.shared.slh\n// libraries/graphics/src/graphics\n//\n// Created by Sam Gateau on 14/9/2016.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n#define SphericalHarmonicsConstRef SphericalHarmonics\n\nstruct SphericalHarmonics {\n vec4 L00;\n vec4 L1m1;\n vec4 L10;\n vec4 L11;\n vec4 L2m2;\n vec4 L2m1;\n vec4 L20;\n vec4 L21;\n vec4 L22;\n};\n\nvec4 sphericalHarmonics_evalSphericalLight(SphericalHarmonicsConstRef sh, vec3 direction) {\n\n vec3 dir = direction.xyz;\n\n const float C1 = 0.429043;\n const float C2 = 0.511664;\n const float C3 = 0.743125;\n const float C4 = 0.886227;\n const float C5 = 0.247708;\n\n vec4 value = C1 * sh.L22 * (dir.x * dir.x - dir.y * dir.y) +\n C3 * sh.L20 * dir.z * dir.z +\n C4 * sh.L00 - C5 * sh.L20 +\n 2.0 * C1 * (sh.L2m2 * dir.x * dir.y +\n sh.L21 * dir.x * dir.z +\n sh.L2m1 * dir.y * dir.z) +\n 2.0 * C2 * (sh.L11 * dir.x +\n sh.L1m1 * dir.y +\n sh.L10 * dir.z);\n return value;\n}\n\n#endif\n\n\n// End C++ compatible// Light Ambient\n\nstruct LightAmbient {\n vec4 _ambient;\n SphericalHarmonics _ambientSphere;\n mat4 transform;\n};\n\nSphericalHarmonics getLightAmbientSphere(LightAmbient l) { return l._ambientSphere; }\n\n\nfloat getLightAmbientIntensity(LightAmbient l) { return l._ambient.x; }\nbool getLightHasAmbientMap(LightAmbient l) { return l._ambient.y > 0.0; }\nfloat getLightAmbientMapNumMips(LightAmbient l) { return l._ambient.y; }\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n\n\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nuniform keyLightBuffer {\n Light light;\n};\nLight getKeyLight() {\n return light;\n}\n\n\nuniform lightAmbientBuffer {\n LightAmbient lightAmbient;\n};\n\nLightAmbient getLightAmbient() {\n return lightAmbient;\n}\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nconst int HAZE_MODE_IS_ACTIVE = 1 << 0;\nconst int HAZE_MODE_IS_ALTITUDE_BASED = 1 << 1;\nconst int HAZE_MODE_IS_KEYLIGHT_ATTENUATED = 1 << 2;\nconst int HAZE_MODE_IS_MODULATE_COLOR = 1 << 3;\nconst int HAZE_MODE_IS_ENABLE_LIGHT_BLEND = 1 << 4;\n\nstruct HazeParams {\n vec3 hazeColor;\n float hazeGlareBlend;\n\n vec3 hazeGlareColor;\n float hazeBaseReference;\n\n vec3 colorModulationFactor;\n int hazeMode;\n\n mat4 transform;\n float backgroundBlend;\n\n float hazeRangeFactor;\n float hazeHeightFactor;\n\n float hazeKeyLightRangeFactor;\n float hazeKeyLightAltitudeFactor;\n};\n\nlayout(std140) uniform hazeBuffer {\n HazeParams hazeParams;\n};\n\n\n// Input:\n// color - fragment original color\n// lightDirectionWS - parameters of the keylight\n// fragPositionWS - fragment position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3 fragPositionWS) {\n // Directional light attenuation is simulated by assuming the light source is at a fixed height above the\n // fragment. This height is where the haze density is reduced by 95% from the haze at the fragment's height\n //\n // The distance is computed from the height and the directional light orientation\n // The distance is limited to height * 1,000, which gives an angle of ~0.057 degrees\n\n // Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)\n float height_95p = 2000.0;\n const float log_p_005 = log(0.05);\n if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {\n height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;\n }\n\n // Note that we need the sine to be positive\n float sin_pitch = abs(lightDirectionWS.y);\n \n float distance;\n const float minimumSinPitch = 0.001;\n if (sin_pitch < minimumSinPitch) {\n distance = height_95p / minimumSinPitch;\n } else {\n distance = height_95p / sin_pitch;\n }\n\n // Integration is from the fragment towards the light source\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeKeyLightRangeFactor * \n exp(-hazeParams.hazeKeyLightAltitudeFactor * (fragPositionWS.y - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n // Note that t is constant and equal to -log(0.05)\n // float t = hazeParams.hazeAltitudeFactor * height_95p;\n // hazeIntegral *= (1.0 - exp (-t)) / t;\n hazeIntegral *= 0.3171178;\n\n return color * exp(-hazeIntegral);\n}\n\n// Input:\n// fragColor - fragment original color\n// fragPositionES - fragment position in eye coordinates\n// fragPositionWS - fragment position in world coordinates\n// eyePositionWS - eye position in world coordinates\n// Output:\n// fragment colour after haze effect\n//\n// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission\n//\nvec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {\n // Distance to fragment \n float distance = length(fragPositionES);\n float eyeWorldHeight = eyePositionWS.y;\n\n // Convert haze colour from uniform into a vec4\n vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);\n\n // Use the haze colour for the glare colour, if blend is not enabled\n vec4 blendedHazeColor;\n if ((hazeParams.hazeMode & HAZE_MODE_IS_ENABLE_LIGHT_BLEND) == HAZE_MODE_IS_ENABLE_LIGHT_BLEND) {\n // Directional light component is a function of the angle from the eye, between the fragment and the sun\n vec3 fragToEyeDirWS = normalize(fragPositionWS - eyePositionWS);\n\n float glareComponent = max(0.0, dot(fragToEyeDirWS, -lightDirectionWS));\n float power = min(1.0, pow(glareComponent, hazeParams.hazeGlareBlend));\n\n vec4 glareColor = vec4(hazeParams.hazeGlareColor, 1.0);\n\n blendedHazeColor = mix(hazeColor, glareColor, power);\n } else {\n blendedHazeColor = hazeColor;\n }\n\n vec4 potentialFragColor;\n\n if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {\n // Compute separately for each colour\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n vec3 hazeDensityDistribution = \n hazeParams.colorModulationFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n vec3 hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n\n vec3 hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));\n } else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {\n // Haze is based only on range\n float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n } else {\n // Haze is based on both range and altitude\n // Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt\n\n // Note that the haze base reference affects only the haze density as function of altitude\n float hazeDensityDistribution = \n hazeParams.hazeRangeFactor * \n exp(-hazeParams.hazeHeightFactor * (eyeWorldHeight - hazeParams.hazeBaseReference));\n\n float hazeIntegral = hazeDensityDistribution * distance;\n\n const float slopeThreshold = 0.01;\n float deltaHeight = fragPositionWS.y - eyeWorldHeight;\n if (abs(deltaHeight) > slopeThreshold) {\n float t = hazeParams.hazeHeightFactor * deltaHeight;\n // Protect from wild values\n const float EPSILON = 0.0000001f;\n if (abs(t) > EPSILON) {\n hazeIntegral *= (1.0 - exp (-t)) / t;\n }\n }\n\n float hazeAmount = 1.0 - exp(-hazeIntegral);\n\n // Compute color after haze effect\n potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);\n }\n\n\n\n // Mix with background at far range\n const float BLEND_DISTANCE = 27000.0f;\n vec4 outFragColor;\n if (distance > BLEND_DISTANCE) {\n outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);\n } else {\n outFragColor = potentialFragColor;\n }\n\n return outFragColor;\n}\n\nvec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {\n float f = pow(1.0 - ndotd, 5.0);\n return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;\n}\n\n// declareSkyboxMap\nuniform samplerCube skyboxMap;\n\nvec4 evalSkyboxLight(vec3 direction, float lod) {\n // textureQueryLevels is not available until #430, so we require explicit lod\n // float mipmapLevel = lod * textureQueryLevels(skyboxMap);\n\n#if !defined(GL_ES)\n float filterLod = textureQueryLod(skyboxMap, direction).x;\n // Keep texture filtering LOD as limit to prevent aliasing on specular reflection\n lod = max(lod, filterLod);\n#endif\n\n return textureLod(skyboxMap, direction, lod);\n}\n\nvec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {\n vec3 specularLight;\n if (getLightHasAmbientMap(ambient))\n {\n float levels = getLightAmbientMapNumMips(ambient);\n float m = 12.0 / (1.0+11.0*surface.roughness);\n float lod = levels - m;\n lod = max(lod, 0.0);\n specularLight = evalSkyboxLight(lightDir, lod).xyz;\n }\n else\n {\n specularLight = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), lightDir).xyz;\n }\n return specularLight;\n}\n\n\nvoid evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambient, SurfaceData surface, \n float metallic, vec3 fresnelF0, vec3 albedo, float obscurance\n) {\n\n // Rotate surface normal and eye direction\n vec3 ambientSpaceSurfaceNormal = (ambient.transform * vec4(surface.normal, 0.0)).xyz;\n vec3 ambientSpaceSurfaceEyeDir = (ambient.transform * vec4(surface.eyeDir, 0.0)).xyz;\nvec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);\n\n diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * \n sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;\n\n // Specular highlight from ambient\n vec3 ambientSpaceLightDir = -reflect(ambientSpaceSurfaceEyeDir, ambientSpaceSurfaceNormal);\n specular = evalAmbientSpecularIrradiance(ambient, surface, ambientSpaceLightDir) * ambientFresnel;\n\nobscurance = mix(1.0, obscurance, isObscuranceEnabled());\n\n float lightEnergy = obscurance * getLightAmbientIntensity(ambient);\n\n diffuse *= mix(vec3(1), albedo, isAlbedoEnabled());\n\n lightEnergy *= isAmbientEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\nvoid evalLightingDirectional(out vec3 diffuse, out vec3 specular, vec3 lightDir, vec3 lightIrradiance,\n SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n) {\n\n // Attenuation\n vec3 lightEnergy = shadow * lightIrradiance;\n\n updateSurfaceDataWithLight(surface, -lightDir);\n\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n);\n\n lightEnergy *= isDirectionalEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n}\n\n\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 normalWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) \n{\n \n // prepareGlobalLight\n // Transform directions to worldspace\n vec3 fragNormalWS = vec3(normalWS);\n vec3 fragPositionWS = vec3(invViewMat * vec4(positionES, 1.0));\n vec3 fragEyeVectorWS = invViewMat[3].xyz - fragPositionWS;\n vec3 fragEyeDirWS = normalize(fragEyeVectorWS);\n\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n\n\n \n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragEyeDirWS);\n\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surfaceWS, metallic, fresnel, albedo, obscurance);\n color += ambientDiffuse;\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surfaceWS, metallic, fresnel, albedo, shadowAttenuation);\n color += directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n fragPositionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector in world coordinates\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\nvec3 evalGlobalLightingAlphaBlendedWithHaze(\n mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 positionES, vec3 positionWS, \n vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, SurfaceData surface, float opacity, vec3 prevLighting) \n{\n // Get light\n Light light = getKeyLight();\n LightAmbient lightAmbient = getLightAmbient();\n \n vec3 lightDirection = getLightDirection(light);\n vec3 lightIrradiance = getLightIrradiance(light);\n\n vec3 color = vec3(0.0);\n\n \n color = prevLighting;\n color += emissive * isEmissiveEnabled();\n\n // Ambient\n vec3 ambientDiffuse;\n vec3 ambientSpecular;\n evalLightingAmbient(ambientDiffuse, ambientSpecular, lightAmbient, surface, metallic, fresnel, albedo, obscurance);\n\n // Directional\n vec3 directionalDiffuse;\n vec3 directionalSpecular;\n evalLightingDirectional(directionalDiffuse, directionalSpecular, lightDirection, lightIrradiance, surface, metallic, fresnel, albedo, shadowAttenuation);\n\n color += ambientDiffuse + directionalDiffuse;\n color += (ambientSpecular + directionalSpecular) / opacity;\n\n // Haze\n if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {\n vec4 colorV4 = computeHazeColor(\n vec4(color, 1.0), // fragment original color\n positionES, // fragment position in eye coordinates\n positionWS, // fragment position in world coordinates\n invViewMat[3].xyz, // eye position in world coordinates\n lightDirection // keylight direction vector\n );\n\n color = colorV4.rgb;\n }\n\n return color;\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 15/01/18.\n// Copyright 2018 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Everything about light\nuniform lightBuffer {\n Light lightArray[256];\n};\nLight getLight(int index) {\n return lightArray[index];\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n \n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * shadow * getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n\n lightEnergy *= isPointEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edge\n float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 7/5/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n\n\nvoid evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,\n vec4 fragLightDirLen, float cosSpotAngle, SurfaceData surface,\n float metallic, vec3 fresnel, vec3 albedo, float shadow\n, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature\n) {\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation \n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow *getLightIrradiance(light);\n\n // Eval shading\n evalFragShading(diffuse, specular, metallic, fresnel, surface, albedo\n,scattering, midNormalCurvature, lowNormalCurvature\n);\n \n lightEnergy *= isSpotEnabled();\n diffuse *= lightEnergy * isDiffuseEnabled();\n specular *= lightEnergy * isSpecularEnabled();\n\n if (isShowLightContour() > 0.0) {\n // Show edges\n float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);\n float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));\n float edgeDist = min(edgeDistR, edgeDistS);\n float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);\n if (edge < 1.0) {\n float edgeCoord = exp2(-8.0*edge*edge);\n diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));\n }\n }\n}\n\n\n\nstruct FrustumGrid {\n float frustumNear;\n float rangeNear;\n float rangeFar;\n float frustumFar;\n ivec3 dims;\n float spare;\n mat4 eyeToGridProj;\n mat4 worldToEyeMat;\n mat4 eyeToWorldMat;\n};\n\nlayout(std140) uniform frustumGridBuffer {\n FrustumGrid frustumGrid;\n};\n\nfloat projection_getNear(mat4 projection) {\n float planeC = projection[2][3] + projection[2][2];\n float planeD = projection[3][2];\n return planeD / planeC;\n}\nfloat projection_getFar(mat4 projection) {\n //float planeA = projection[0][3] - projection[0][2]; All Zeros\n //float planeB = projection[1][3] - projection[1][2]; All Zeros\n float planeC = projection[2][3] - projection[2][2];\n float planeD = /*projection[3][3]*/ -projection[3][2];\n return planeD / planeC;\n}\n\n// glsl / C++ compatible source as interface for FrustrumGrid\n// glsl / C++ compatible source as interface for FrustrumGrid\n#if defined(Q_OS_LINUX)\n#define float_exp2 exp2f\n#else\n#define float_exp2 exp2\n#endif\n\nfloat frustumGrid_depthRampGridToVolume(float ngrid) {\n // return ngrid;\n // return sqrt(ngrid);\n return float_exp2(ngrid) - 1.0f;\n}\nfloat frustumGrid_depthRampInverseVolumeToGrid(float nvolume) {\n // return nvolume;\n // return nvolume * nvolume;\n return log2(nvolume + 1.0f);\n}\n\nvec3 frustumGrid_gridToVolume(vec3 pos, ivec3 dims) {\n vec3 gridScale = vec3(1.0f) / vec3(dims);\n vec3 volumePos = pos * gridScale;\n volumePos.z = frustumGrid_depthRampGridToVolume(volumePos.z);\n return volumePos;\n}\n\n\nfloat frustumGrid_volumeToGridDepth(float vposZ, ivec3 dims) {\n return frustumGrid_depthRampInverseVolumeToGrid(vposZ) * float(dims.z);\n}\n\nvec3 frustumGrid_volumeToGrid(vec3 vpos, ivec3 dims) {\n vec3 gridPos = vec3(vpos.x, vpos.y, frustumGrid_depthRampInverseVolumeToGrid(vpos.z)) * vec3(dims);\n return gridPos;\n}\n\n\nvec4 frustumGrid_volumeToClip(vec3 vpos, float rangeNear, float rangeFar) {\n vec3 ndcPos = vec3(-1.0f + 2.0f * vpos.x, -1.0f + 2.0f * vpos.y, vpos.z);\n float depth = rangeNear * (1.0f - ndcPos.z) + rangeFar * (ndcPos.z);\n vec4 clipPos = vec4(ndcPos.x * depth, ndcPos.y * depth, 1.0f, depth);\n return clipPos;\n}\n\nvec3 frustumGrid_clipToEye(vec4 clipPos, mat4 projection) {\n return vec3(\n (clipPos.x + projection[2][0] * clipPos.w) / projection[0][0],\n (clipPos.y + projection[2][1] * clipPos.w) / projection[1][1],\n -clipPos.w\n //, (clipPos.z - projection[3][3] * clipPos.w) / projection[3][2]\n );\n}\n\nvec3 frustumGrid_volumeToEye(vec3 vpos, mat4 projection, float rangeNear, float rangeFar) {\n return frustumGrid_clipToEye(frustumGrid_volumeToClip(vpos, rangeNear, rangeFar), projection);\n}\n\nfloat frustumGrid_eyeToVolumeDepth(float eposZ, float rangeNear, float rangeFar) {\n return (-eposZ - rangeNear) / (rangeFar - rangeNear);\n}\n\nvec3 frustumGrid_eyeToVolume(vec3 epos, mat4 projection, float rangeNear, float rangeFar) {\n vec4 clipPos = vec4(epos.x * projection[0][0] + epos.z * projection[2][0],\n epos.y * projection[1][1] + epos.z * projection[2][1],\n epos.z * projection[2][2] + projection[2][3],\n -epos.z);\n vec4 ndcPos = clipPos / clipPos.w;\n\n vec3 volumePos = vec3(0.5f * (ndcPos.x + 1.0f), 0.5f * (ndcPos.y + 1.0f), (clipPos.w - rangeNear) / (rangeFar - rangeNear));\n return volumePos;\n}\n\n\n\nint frustumGrid_numClusters() {\n return frustumGrid.dims.x * frustumGrid.dims.y * (frustumGrid.dims.z + 1);\n}\n\nint frustumGrid_clusterToIndex(ivec3 pos) {\n return pos.x + (pos.y + pos.z * frustumGrid.dims.y) * frustumGrid.dims.x;\n}\nivec3 frustumGrid_indexToCluster(int index) {\n ivec3 summedDims = ivec3(frustumGrid.dims.x * frustumGrid.dims.y, frustumGrid.dims.x, 1);\n int layer = index / summedDims.x;\n int offsetInLayer = index % summedDims.x;\n ivec3 clusterPos = ivec3(offsetInLayer % summedDims.y, offsetInLayer / summedDims.y, layer);\n return clusterPos;\n}\n\nvec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {\n\n vec3 cvpos = clusterPos;\n\n\n vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);\n\n vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n return eyePos;\n}\n\nvec3 frustumGrid_clusterPosToEye(ivec3 clusterPos, vec3 offset) {\n vec3 cvpos = vec3(clusterPos) + offset;\n return frustumGrid_clusterPosToEye(cvpos);\n}\n\nint frustumGrid_eyeDepthToClusterLayer(float eyeZ) {\n if ((eyeZ > -frustumGrid.frustumNear) || (eyeZ < -frustumGrid.frustumFar)) {\n return -2;\n }\n\n if (eyeZ > -frustumGrid.rangeNear) {\n return -1;\n }\n\n float volumeZ = frustumGrid_eyeToVolumeDepth(eyeZ, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));\n\n if (gridZ >= frustumGrid.dims.z) {\n gridZ = frustumGrid.dims.z;\n }\n\n\n return gridZ;\n}\n\nivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {\n\n // make sure the frontEyePos is always in the front to eval the grid pos correctly\n vec3 frontEyePos = eyePos;\n frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);\n vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);\n\n\n vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);\n \n if (gridPos.z >= float(frustumGrid.dims.z)) {\n gridPos.z = float(frustumGrid.dims.z);\n }\n\n ivec3 igridPos = ivec3(floor(gridPos));\n\n if ((eyePos.z > -frustumGrid.frustumNear) || (eyePos.z < -frustumGrid.frustumFar)) {\n return ivec3(igridPos.x, igridPos.y, - 2);\n }\n\n if (eyePos.z > -frustumGrid.rangeNear) {\n return ivec3(igridPos.x, igridPos.y, -1);\n }\n\n return igridPos;\n}\n\n\nint frustumGrid_eyeToClusterDirH(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.x / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[0][0] - frustumGrid.eyeToGridProj[2][0];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.x);\n\n return int(gridPos);\n}\n\nint frustumGrid_eyeToClusterDirV(vec3 eyeDir) {\n if (eyeDir.z >= 0.0f) {\n return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);\n }\n\n float eyeDepth = -eyeDir.z;\n float nclipDir = eyeDir.y / eyeDepth;\n float ndcDir = nclipDir * frustumGrid.eyeToGridProj[1][1] - frustumGrid.eyeToGridProj[2][1];\n float volumeDir = 0.5f * (ndcDir + 1.0f);\n float gridPos = volumeDir * float(frustumGrid.dims.y);\n\n return int(gridPos);\n}\n\nivec2 frustumGrid_eyeToClusterDir(vec3 eyeDir) {\n return ivec2(frustumGrid_eyeToClusterDirH(eyeDir), frustumGrid_eyeToClusterDirV(eyeDir));\n}\n\nvec4 frustumGrid_eyeToWorld(vec4 eyePos) {\n return frustumGrid.eyeToWorldMat * eyePos;\n}\n\nvec4 frustumGrid_worldToEye(vec4 worldPos) {\n return frustumGrid.worldToEyeMat * worldPos;\n}\n\n\n\n // End C++ compatible// end of hybrid include\n\n#define GRID_NUM_ELEMENTS 4096\n#define GRID_INDEX_TYPE ivec4\n#define GRID_FETCH_BUFFER(i) i / 4][i % 4\n\nlayout(std140) uniform clusterGridBuffer {\n GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];\n};\n\nlayout(std140) uniform clusterContentBuffer {\n GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];\n};\n\nivec3 clusterGrid_getCluster(int index) {\n int clusterDesc = _clusterGridTable[GRID_FETCH_BUFFER(index)];\n int numPointLights = 0xFF & (clusterDesc >> 16);\n int numSpotLights = 0xFF & (clusterDesc >> 24);\n int contentOffset = 0xFFFF & (clusterDesc);\n return ivec3(numPointLights, numSpotLights, contentOffset);\n}\n\nint clusterGrid_getClusterLightId(int index, int offset) {\n int elementIndex = offset + index;\n /*\n int element = _clusterGridContent[GRID_FETCH_BUFFER(elementIndex)];\n return element;\n */\n int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];\n return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;\n}\n\n\nbool hasLocalLights(int numLights, ivec3 clusterPos, ivec3 dims) {\n return numLights>0 \n && all(greaterThanEqual(clusterPos, ivec3(0))) \n && all(lessThan(clusterPos.xy, dims.xy))\n && clusterPos.z <= dims.z;\n}\n\nvec4 evalLocalLighting(ivec3 cluster, int numLights, vec3 fragWorldPos, SurfaceData surface,\n float fragMetallic, vec3 fragFresnel, vec3 fragAlbedo, float fragScattering, \n vec4 midNormalCurvature, vec4 lowNormalCurvature, float opacity) {\n vec4 fragColor = vec4(0.0);\n vec3 fragSpecular = vec3(0.0);\n vec3 fragDiffuse = vec3(0.0);\n\n\n int lightClusterOffset = cluster.z;\n\n // Compute the rougness into gloss2 once:\n bool withScattering = (fragScattering * isScatteringEnabled() > 0.0);\n\n int numLightTouching = 0;\n for (int i = 0; i < cluster.x; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n vec3 lightEnergy = radialAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n for (int i = cluster.x; i < numLights; i++) {\n // Need the light now\n int theLightIndex = clusterGrid_getClusterLightId(i, lightClusterOffset);\n Light light = getLight(theLightIndex);\n\n // Clip againgst the light volume and Make the Light vector going from fragment to light center in world space\n vec4 fragLightVecLen2;\n vec4 fragLightDirLen;\n float cosSpotAngle;\n\n if (!lightVolume_clipFragToLightVolumePoint(light.volume, fragWorldPos.xyz, fragLightVecLen2)) {\n continue;\n }\n\n // Allright we re in the light sphere volume\n fragLightDirLen.w = length(fragLightVecLen2.xyz);\n fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;\n if (dot(surface.normal, fragLightDirLen.xyz) < 0.0) {\n continue;\n }\n\n // Check spot\n if (!lightVolume_clipFragToLightVolumeSpotSide(light.volume, fragLightDirLen, cosSpotAngle)) {\n continue;\n }\n\n numLightTouching++;\n\n vec3 diffuse = vec3(1.0);\n vec3 specular = vec3(0.1);\n\n // Allright we re valid in the volume\n float fragLightDistance = fragLightDirLen.w;\n vec3 fragLightDir = fragLightDirLen.xyz;\n\n updateSurfaceDataWithLight(surface, fragLightDir);\n\n // Eval attenuation\n float radialAttenuation = lightIrradiance_evalLightAttenuation(light.irradiance, fragLightDistance);\n float angularAttenuation = lightIrradiance_evalLightSpotAttenuation(light.irradiance, cosSpotAngle);\n vec3 lightEnergy = radialAttenuation * angularAttenuation * getLightIrradiance(light);\n\n // Eval shading\n if (withScattering) {\n evalFragShadingScattering(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo,\n fragScattering, midNormalCurvature, lowNormalCurvature );\n } else {\n evalFragShadingGloss(diffuse, specular, fragMetallic, fragFresnel, surface, fragAlbedo);\n }\n\n diffuse *= lightEnergy;\n specular *= lightEnergy;\n\n fragDiffuse.rgb += diffuse;\n fragSpecular.rgb += specular;\n }\n\n fragDiffuse *= isDiffuseEnabled();\n fragSpecular *= isSpecularEnabled();\n\n fragColor.rgb += fragDiffuse;\n fragColor.rgb += fragSpecular / opacity;\n return fragColor;\n}// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\n\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nin vec2 _texCoord0;\nin vec2 _texCoord1;\nin vec4 _positionES;\nin vec4 _positionWS;\nin vec3 _normalWS;\nin vec3 _color;\nin float _alpha;\n\nout vec4 _fragColor;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = getMaterialOpacity(mat) * _alpha;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n vec3 fresnel = getFresnelF0(metallic, albedo);\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragPositionES = _positionES.xyz;\n vec3 fragPositionWS = _positionWS.xyz;\n // Lighting is done in world space\n vec3 fragNormalWS = normalize(_normalWS);\n\n TransformCamera cam = getTransformCamera();\n vec3 fragToEyeWS = cam._viewInverse[3].xyz - fragPositionWS;\n vec3 fragToEyeDirWS = normalize(fragToEyeWS);\n SurfaceData surfaceWS = initSurfaceData(roughness, fragNormalWS, fragToEyeDirWS);\n\n vec4 localLighting = vec4(0.0);\n\n // From frag world pos find the cluster\n vec4 clusterEyePos = frustumGrid_worldToEye(_positionWS);\n ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);\n\n ivec3 cluster = clusterGrid_getCluster(frustumGrid_clusterToIndex(clusterPos));\n int numLights = cluster.x + cluster.y;\n ivec3 dims = frustumGrid.dims.xyz;\n\n;\n if (hasLocalLights(numLights, clusterPos, dims)) {\n localLighting = evalLocalLighting(cluster, numLights, fragPositionWS, surfaceWS,\n metallic, fresnel, albedo, 0.0,\n vec4(0), vec4(0), opacity);\n }\n\n _fragColor = vec4(evalGlobalLightingAlphaBlendedWithHaze(\n cam._viewInverse,\n 1.0,\n occlusionTex,\n fragPositionES,\n fragPositionWS,\n albedo,\n fresnel,\n metallic,\n emissive + fadeEmissive,\n surfaceWS, opacity, localLighting.rgb),\n opacity);\n}\n\n\n"
+ },
+ "npKcxyZ3Zyj+Ys2SB7hVkQ==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// simple.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 9/15/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\n// the interpolated normal\nout vec3 _normalWS;\nout vec3 _normalMS;\nout vec4 _color;\nout vec2 _texCoord0;\nout vec4 _positionMS;\nout vec4 _positionES;\n\nvoid main(void) {\n _color = color_sRGBAToLinear(inColor);\n _texCoord0 = inTexCoord0.st;\n _positionMS = inPosition;\n _normalMS = inNormal.xyz;\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:09 2018\n//\n// simple_transparent_textured_unlit.frag\n// fragment shader\n//\n// Created by Sam Gateau on 4/3/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// the albedo texture\nuniform sampler2D originalTexture;\n\nin vec4 _color;\nin vec2 _texCoord0;\n\nlayout(location = 0) out vec4 _fragColor0;\n\nvoid main(void) {\n vec4 texel = texture(originalTexture, _texCoord0.st);\n float colorAlpha = _color.a;\n if (_color.a <= 0.0) {\n texel = color_sRGBAToLinear(texel);\n colorAlpha = -_color.a;\n }\n _fragColor0 = vec4(_color.rgb * texel.rgb, colorAlpha * texel.a);\n}\n\n"
+ },
+ "oe9F/0BImUlZtf8jGG3qng==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_normal_map_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 04/24/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec4 _positionWS;\nlayout(location = 2) out vec2 _texCoord0;\nlayout(location = 3) out vec2 _texCoord1;\nlayout(location = 4) out vec3 _normalWS;\nlayout(location = 5) out vec3 _tangentWS;\nlayout(location = 6) out vec3 _color;\nlayout(location = 7) out float _alpha;\n\nvoid main(void) {\n // pass along the color\n _color = color_sRGBToLinear(inColor.rgb);\n _alpha = inColor.a;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _tangentWS = vec3(dot(mr0, inTangent.xyz), dot(mr1, inTangent.xyz), dot(mr2, inTangent.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define normalMap 1\nvec3 fetchNormalMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, normalMap, uv).xyz;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D normalMap;\nvec3 fetchNormalMap(vec2 uv) {\n // unpack normal, swizzle to get into hifi tangent space with Y axis pointing out\n vec2 t = 2.0 * (texture(normalMap, uv, TAA_TEXTURE_LOD_BIAS).rg - vec2(0.5, 0.5));\n vec2 t2 = t*t;\n return vec3(t.x, sqrt(1.0 - t2.x - t2.y), t.y);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\n#endif\n\n\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Olivier Prat on 04/12/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n#define CATEGORY_COUNT 5\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define VEC4 glm::vec4\n# define VEC2 glm::vec2\n# define FLOAT32 glm::float32\n# define INT32 glm::int32\n#else\n# define VEC4 vec4\n# define VEC2 vec2\n# define FLOAT32 float\n# define INT32 int\n#endif\n\nstruct FadeParameters\n{\n\tVEC4 _noiseInvSizeAndLevel;\n\tVEC4 _innerEdgeColor;\n\tVEC4 _outerEdgeColor;\n\tVEC2 _edgeWidthInvWidth;\n\tFLOAT32 _baseLevel;\n\tINT32 _isInverted;\n};\n\n // //\nlayout(std140) uniform fadeParametersBuffer {\n FadeParameters fadeParameters[CATEGORY_COUNT];\n};\nuniform sampler2D fadeMaskMap;\n\nstruct FadeObjectParams {\n int category;\n float threshold;\n vec3 noiseOffset;\n vec3 baseOffset;\n vec3 baseInvSize;\n};\n\nvec2 hash2D(vec3 position) {\n return position.xy* vec2(0.1677, 0.221765) + position.z*0.561;\n}\n\nfloat noise3D(vec3 position) {\n float n = textureLod(fadeMaskMap, hash2D(position), 0.0).r;\n return pow(n, 1.0/2.2); // Remove sRGB. Need to fix this later directly in the texture\n}\n\nfloat evalFadeNoiseGradient(FadeObjectParams params, vec3 position) {\n // Do tri-linear interpolation\n vec3 noisePosition = position * fadeParameters[params.category]._noiseInvSizeAndLevel.xyz + params.noiseOffset;\n vec3 noisePositionFloored = floor(noisePosition);\n vec3 noisePositionFraction = fract(noisePosition);\n\n noisePositionFraction = noisePositionFraction*noisePositionFraction*(3.0 - 2.0*noisePositionFraction);\n\n float noiseLowXLowYLowZ = noise3D(noisePositionFloored);\n float noiseLowXHighYLowZ = noise3D(noisePositionFloored+vec3(0,1,0));\n float noiseHighXLowYLowZ = noise3D(noisePositionFloored+vec3(1,0,0));\n float noiseHighXHighYLowZ = noise3D(noisePositionFloored+vec3(1,1,0));\n float noiseLowXLowYHighZ = noise3D(noisePositionFloored+vec3(0,0,1));\n float noiseLowXHighYHighZ = noise3D(noisePositionFloored+vec3(0,1,1));\n float noiseHighXLowYHighZ = noise3D(noisePositionFloored+vec3(1,0,1));\n float noiseHighXHighYHighZ = noise3D(noisePositionFloored+vec3(1,1,1));\n vec4 maskLowZ = vec4(noiseLowXLowYLowZ, noiseLowXHighYLowZ, noiseHighXLowYLowZ, noiseHighXHighYLowZ);\n vec4 maskHighZ = vec4(noiseLowXLowYHighZ, noiseLowXHighYHighZ, noiseHighXLowYHighZ, noiseHighXHighYHighZ);\n vec4 maskXY = mix(maskLowZ, maskHighZ, noisePositionFraction.z);\n vec2 maskY = mix(maskXY.xy, maskXY.zw, noisePositionFraction.x);\n\n float noise = mix(maskY.x, maskY.y, noisePositionFraction.y);\n noise -= 0.5; // Center on value 0\n return noise * fadeParameters[params.category]._noiseInvSizeAndLevel.w;\n}\n\nfloat evalFadeBaseGradient(FadeObjectParams params, vec3 position) {\n float gradient = length((position - params.baseOffset) * params.baseInvSize.xyz);\n gradient = gradient-0.5; // Center on value 0.5\n gradient *= fadeParameters[params.category]._baseLevel;\n return gradient;\n}\n\nfloat evalFadeGradient(FadeObjectParams params, vec3 position) {\n float baseGradient = evalFadeBaseGradient(params, position);\n float noiseGradient = evalFadeNoiseGradient(params, position);\n float gradient = noiseGradient+baseGradient+0.5;\n\n return gradient;\n}\n\nfloat evalFadeAlpha(FadeObjectParams params, vec3 position) {\n return evalFadeGradient(params, position)-params.threshold;\n}\n\nvoid applyFadeClip(FadeObjectParams params, vec3 position) {\n if (evalFadeAlpha(params, position) < 0.0) {\n discard;\n }\n}\n\nvoid applyFade(FadeObjectParams params, vec3 position, out vec3 emissive) {\n float alpha = evalFadeAlpha(params, position);\n if (fadeParameters[params.category]._isInverted!=0) {\n alpha = -alpha;\n }\n\n if (alpha < 0.0) {\n discard;\n }\n \n float edgeMask = alpha * fadeParameters[params.category]._edgeWidthInvWidth.y;\n float edgeAlpha = 1.0-clamp(edgeMask, 0.0, 1.0);\n\n edgeMask = step(edgeMask, 1.0);\n edgeAlpha *= edgeAlpha; // Square to have a nice ease out\n vec4 color = mix(fadeParameters[params.category]._innerEdgeColor, fadeParameters[params.category]._outerEdgeColor, edgeAlpha);\n emissive = color.rgb * edgeMask * color.a;\n}\n\n\nuniform int fadeCategory;\nuniform vec3 fadeNoiseOffset;\nuniform vec3 fadeBaseOffset;\nuniform vec3 fadeBaseInvSize;\nuniform float fadeThreshold;\n\n\n\n\nlayout(location = 0) in vec4 _positionES;\nlayout(location = 1) in vec4 _positionWS;\nlayout(location = 2) in vec2 _texCoord0;\nlayout(location = 3) in vec2 _texCoord1;\nlayout(location = 4) in vec3 _normalWS;\nlayout(location = 5) in vec3 _tangentWS;\nlayout(location = 6) in vec3 _color;\n\nvoid main(void) {\n vec3 fadeEmissive;\n FadeObjectParams fadeParams;\n\n fadeParams.category = fadeCategory;\n fadeParams.threshold = fadeThreshold;\n fadeParams.noiseOffset = fadeNoiseOffset;\n fadeParams.baseOffset = fadeBaseOffset;\n fadeParams.baseInvSize = fadeBaseInvSize;\n\n applyFade(fadeParams, _positionWS.xyz, fadeEmissive);\n\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nvec3 normalTex = (((matKey & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(_texCoord0) : vec3(0.0, 1.0, 0.0));\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\n\n\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n vec3 fragNormalWS;\n {\n vec3 normalizedNormal = normalize(_normalWS.xyz);\n vec3 normalizedTangent = normalize(_tangentWS.xyz);\n vec3 normalizedBitangent = cross(normalizedNormal, normalizedTangent);\n // attenuate the normal map divergence from the mesh normal based on distance\n // The attenuation range [30,100] meters from the eye is arbitrary for now\n vec3 localNormal = mix(normalTex, vec3(0.0, 1.0, 0.0), smoothstep(30.0, 100.0, (-_positionES).z));\n fragNormalWS = vec3(normalizedBitangent * localNormal.x + normalizedNormal * localNormal.y + normalizedTangent * localNormal.z);\n}\n\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n\n packDeferredFragment(\n normalize(fragNormalWS.xyz),\n opacity,\n albedo,\n roughness,\n metallic,\n emissive + fadeEmissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "opM7PUkZqQ+y3QE9UPUYHQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n// model.vert\n// vertex shader\n//\n// Created by Andrzej Kapolka on 10/14/13.\n// Copyright 2013 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _color;\nlayout(location = 5) out float _alpha;\n\nvoid main(void) {\n _color = color_sRGBToLinear(inColor.xyz);\n _alpha = inColor.w;\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_specular_map.frag\n// fragment shader\n//\n// Created by Andrzej Kapolka on 5/6/14.\n// Copyright 2014 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3 normals to vec3 rgb with best efficiency\nvec3 packNormal(in vec3 n) {\n return snorm12x2_to_unorm8x3(float32x3_to_oct(n));\n}\n\nvec3 unpackNormal(in vec3 p) {\n return oct_to_float32x3(unorm8x3_to_snorm12x2(p));\n}\n\n// Unpack the metallic-mode value\nconst float FRAG_PACK_SHADED_NON_METALLIC = 0.0;\nconst float FRAG_PACK_SHADED_METALLIC = 0.1;\nconst float FRAG_PACK_SHADED_RANGE_INV = 1.0 / (FRAG_PACK_SHADED_METALLIC - FRAG_PACK_SHADED_NON_METALLIC);\n\nconst float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;\nconst float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;\nconst float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);\n\nconst float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;\nconst float FRAG_PACK_SCATTERING_METALLIC = 0.5;\nconst float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);\n\nconst float FRAG_PACK_UNLIT = 0.6;\n\nconst int FRAG_MODE_UNLIT = 0;\nconst int FRAG_MODE_SHADED = 1;\nconst int FRAG_MODE_LIGHTMAPPED = 2;\nconst int FRAG_MODE_SCATTERING = 3;\n\nvoid unpackModeMetallic(float rawValue, out int mode, out float metallic) {\n if (rawValue <= FRAG_PACK_SHADED_METALLIC) {\n mode = FRAG_MODE_SHADED;\n metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {\n mode = FRAG_MODE_LIGHTMAPPED;\n metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);\n } else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {\n mode = FRAG_MODE_SCATTERING;\n metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);\n } else if (rawValue >= FRAG_PACK_UNLIT) {\n mode = FRAG_MODE_UNLIT;\n metallic = 0.0;\n }\n}\n\nfloat packShadedMetallic(float metallic) {\n return mix(FRAG_PACK_SHADED_NON_METALLIC, FRAG_PACK_SHADED_METALLIC, metallic);\n}\n\nfloat packLightmappedMetallic(float metallic) {\n return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);\n}\n\nfloat packScatteringMetallic(float metallic) {\n return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);\n}\n\nfloat packUnlit() {\n return FRAG_PACK_UNLIT;\n}\n\nlayout(location = 0) out vec4 _fragColor0; // albedo / metallic\nlayout(location = 1) out vec4 _fragColor1; // Normal\nlayout(location = 2) out vec4 _fragColor2; // scattering / emissive / occlusion\nlayout(location = 3) out vec4 _fragColor3; // emissive\n\n// the alpha threshold\nconst float alphaThreshold = 0.5;\nfloat evalOpaqueFinalAlpha(float alpha, float mapAlpha) {\n return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));\n}\n\nconst float DEFAULT_ROUGHNESS = 0.9;\nconst float DEFAULT_SHININESS = 10.0;\nconst float DEFAULT_METALLIC = 0.0;\nconst vec3 DEFAULT_SPECULAR = vec3(0.1);\nconst vec3 DEFAULT_EMISSIVE = vec3(0.0);\nconst float DEFAULT_OCCLUSION = 1.0;\nconst float DEFAULT_SCATTERING = 0.0;\nconst vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;\n\nstruct LightingModel {\n vec4 _UnlitEmissiveLightmapBackground;\n vec4 _ScatteringDiffuseSpecularAlbedo;\n vec4 _AmbientDirectionalPointSpot;\n vec4 _ShowContourObscuranceWireframe;\n vec4 _Haze_spareyzw;\n};\n\nuniform lightingModelBuffer{\n LightingModel lightingModel;\n};\n\nfloat isUnlitEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.x;\n}\nfloat isEmissiveEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.y;\n}\nfloat isLightmapEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.z;\n}\nfloat isBackgroundEnabled() {\n return lightingModel._UnlitEmissiveLightmapBackground.w;\n}\nfloat isObscuranceEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.y;\n}\n\nfloat isScatteringEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.x;\n}\nfloat isDiffuseEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.y;\n}\nfloat isSpecularEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.z;\n}\nfloat isAlbedoEnabled() {\n return lightingModel._ScatteringDiffuseSpecularAlbedo.w;\n}\n\nfloat isAmbientEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.x;\n}\nfloat isDirectionalEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.y;\n}\nfloat isPointEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.z;\n}\nfloat isSpotEnabled() {\n return lightingModel._AmbientDirectionalPointSpot.w;\n}\n\nfloat isShowLightContour() {\n return lightingModel._ShowContourObscuranceWireframe.x;\n}\n\nfloat isWireframeEnabled() {\n return lightingModel._ShowContourObscuranceWireframe.z;\n}\n\nfloat isHazeEnabled() {\n return lightingModel._Haze_spareyzw.x;\n}\n\n\n\nstruct SurfaceData {\n vec3 normal;\n vec3 eyeDir;\n vec3 lightDir;\n vec3 halfDir;\n float roughness;\n float roughness2;\n float roughness4;\n float ndotv;\n float ndotl;\n float ndoth;\n float ldoth;\n float smithInvG1NdotV;\n};\n\nvec3 getFresnelF0(float metallic, vec3 metalF0) {\n // Enable continuous metallness value by lerping between dielectric\n // and metal fresnel F0 value based on the \"metallic\" parameter\n return mix(vec3(0.03), metalF0, metallic);\n}\nfloat evalSmithInvG1(float roughness4, float ndotd) {\n return ndotd + sqrt(roughness4+ndotd*ndotd*(1.0-roughness4));\n}\n\nSurfaceData initSurfaceData(float roughness, vec3 normal, vec3 eyeDir) {\n SurfaceData surface;\n surface.eyeDir = eyeDir;\n surface.normal = normal;\n surface.roughness = mix(0.01, 1.0, roughness);\n surface.roughness2 = surface.roughness * surface.roughness;\n surface.roughness4 = surface.roughness2 * surface.roughness2;\n surface.ndotv = clamp(dot(normal, eyeDir), 0.0, 1.0);\n surface.smithInvG1NdotV = evalSmithInvG1(surface.roughness4, surface.ndotv);\n\n // These values will be set when we know the light direction, in updateSurfaceDataWithLight\n surface.ndoth = 0.0;\n surface.ndotl = 0.0;\n surface.ldoth = 0.0;\n surface.lightDir = vec3(0,0,1);\n surface.halfDir = vec3(0,0,1);\n\n return surface;\n}\n\nvoid updateSurfaceDataWithLight(inout SurfaceData surface, vec3 lightDir) {\n surface.lightDir = lightDir;\n surface.halfDir = normalize(surface.eyeDir + lightDir);\n vec3 dots;\n dots.x = dot(surface.normal, surface.halfDir);\n dots.y = dot(surface.normal, surface.lightDir);\n dots.z = dot(surface.halfDir, surface.lightDir);\n dots = clamp(dots, vec3(0), vec3(1));\n surface.ndoth = dots.x;\n surface.ndotl = dots.y;\n surface.ldoth = dots.z;\n}\n\nvec3 fresnelSchlickColor(vec3 fresnelColor, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return vec3(exponential) + fresnelColor * (1.0 - exponential);\n}\n\nfloat fresnelSchlickScalar(float fresnelScalar, SurfaceData surface) {\n float base = 1.0 - surface.ldoth;\n //float exponential = pow(base, 5.0);\n float base2 = base * base;\n float exponential = base * base2 * base2;\n return (exponential) + fresnelScalar * (1.0 - exponential);\n}\n\nfloat specularDistribution(SurfaceData surface) {\n // See https://www.khronos.org/assets/uploads/developers/library/2017-web3d/glTF-2.0-Launch_Jun17.pdf\n // for details of equations, especially page 20\n float denom = (surface.ndoth*surface.ndoth * (surface.roughness4 - 1.0) + 1.0);\n denom *= denom;\n // Add geometric factors G1(n,l) and G1(n,v)\n float smithInvG1NdotL = evalSmithInvG1(surface.roughness4, surface.ndotl);\n denom *= surface.smithInvG1NdotV * smithInvG1NdotL;\n // Don't divide by PI as this is part of the light normalization factor\n float power = surface.roughness4 / denom;\n return power;\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShading(float metallic, vec3 fresnel, SurfaceData surface) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n float diffuse = (1.0 - metallic) * angleAttenuation * (1.0 - fresnelColor.x);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n\n\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\n// Frag Shading returns the diffuse amount as W and the specular rgb as xyz\nvec4 evalPBRShadingDielectric(SurfaceData surface, float fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n float fresnelScalar = fresnelSchlickScalar(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = vec3(fresnelScalar) * power * angleAttenuation;\n float diffuse = angleAttenuation * (1.0 - fresnelScalar);\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, diffuse);\n}\n\nvec4 evalPBRShadingMetallic(SurfaceData surface, vec3 fresnel) {\n // Incident angle attenuation\n float angleAttenuation = surface.ndotl;\n\n // Specular Lighting\n vec3 fresnelColor = fresnelSchlickColor(fresnel, surface);\n float power = specularDistribution(surface);\n vec3 specular = fresnelColor * power * angleAttenuation;\n\n // We don't divided by PI, as the \"normalized\" equations state we should, because we decide, as Naty Hoffman, that\n // we wish to have a similar color as raw albedo on a perfectly diffuse surface perpendicularly lit\n // by a white light of intensity 1. But this is an arbitrary normalization of what light intensity \"means\".\n // (see http://blog.selfshadow.com/publications/s2013-shading-course/hoffman/s2013_pbs_physics_math_notes.pdf\n // page 23 paragraph \"Punctual light sources\")\n return vec4(specular, 0.f);\n}\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\nuniform sampler2D scatteringSpecularBeckmann;\n\nfloat fetchSpecularBeckmann(float ndoth, float roughness) {\n return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);\n}\n\nvec2 skinSpecular(SurfaceData surface, float intensity) {\n vec2 result = vec2(0.0, 1.0);\n if (surface.ndotl > 0.0) {\n float PH = fetchSpecularBeckmann(surface.ndoth, surface.roughness);\n float F = fresnelSchlickScalar(0.028, surface);\n float frSpec = max(PH * F / dot(surface.halfDir, surface.halfDir), 0.0);\n result.x = surface.ndotl * intensity * frSpec;\n result.y -= F;\n }\n\n return result;\n}\n\n// Generated on Wed May 23 14:24:08 2018\n//\n// Created by Sam Gateau on 6/8/16.\n// Copyright 2016 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nuniform sampler2D scatteringLUT;\n\nvec3 fetchBRDF(float LdotN, float curvature) {\n return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2.0 * curvature, 0.0, 1.0))).xyz;\n}\n\nvec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {\n return vec3(\n fetchBRDF(LdotNSpectrum.r, curvature).r,\n fetchBRDF(LdotNSpectrum.g, curvature).g,\n fetchBRDF(LdotNSpectrum.b, curvature).b);\n}\n\n// Subsurface Scattering parameters\nstruct ScatteringParameters {\n vec4 normalBendInfo; // R, G, B, factor\n vec4 curvatureInfo;// Offset, Scale, level\n vec4 debugFlags;\n};\n\nuniform subsurfaceScatteringParametersBuffer {\n ScatteringParameters parameters;\n};\n\nvec3 getBendFactor() {\n return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;\n}\n\nfloat getScatteringLevel() {\n return parameters.curvatureInfo.z;\n}\n\nbool showBRDF() {\n return parameters.curvatureInfo.w > 0.0;\n}\n\nbool showCurvature() {\n return parameters.debugFlags.x > 0.0;\n}\nbool showDiffusedNormal() {\n return parameters.debugFlags.y > 0.0;\n}\n\n\nfloat tuneCurvatureUnsigned(float curvature) {\n return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;\n}\n\nfloat unpackCurvature(float packedCurvature) {\n return (packedCurvature * 2.0 - 1.0);\n}\n\nvec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {\n vec3 bendFactorSpectrum = getBendFactor();\n // vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));\n vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));\n vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));\n vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));\n\n vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));\n\n return NdotLSpectrum;\n}\n\n\n\n\n\nvec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {\n if (showDiffusedNormal()) {\n return lowNormal * 0.5 + vec3(0.5);\n }\n if (showCurvature()) {\n return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));\n }\n\n vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);\n\n float tunedCurvature = tuneCurvatureUnsigned(curvature);\n\n vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);\n return brdf;\n}\n\n\n\n\nvoid evalFragShading(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n if (scattering * isScatteringEnabled() > 0.0) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n diffuse = mix(vec3(surface.ndotl), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n } else {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n specular = shading.xyz;\n }\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\n\nvoid evalFragShadingScattering(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo,\n float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {\n vec3 brdf = evalSkinBRDF(surface.lightDir, surface.normal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);\n float NdotL = surface.ndotl;\n diffuse = mix(vec3(NdotL), brdf, scattering);\n\n // Specular Lighting\n vec2 specularBrdf = skinSpecular(surface, 1.0);\n \n diffuse *= specularBrdf.y;\n specular = vec3(specularBrdf.x);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n}\n\nvoid evalFragShadingGloss(out vec3 diffuse, out vec3 specular,\n float metallic, vec3 fresnel, SurfaceData surface, vec3 albedo) {\n vec4 shading = evalPBRShading(metallic, fresnel, surface);\n diffuse = vec3(shading.w);\n diffuse *= mix(vec3(1.0), albedo, isAlbedoEnabled());\n specular = shading.xyz;\n}\n\n\nvoid packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);\n \n _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);\n}\n\nvoid packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {\n if (alpha != 1.0) {\n discard;\n }\n\n _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);\n\n _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);\n}\n\nvoid packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {\n if (alpha != 1.0) {\n discard;\n }\n _fragColor0 = vec4(color, packUnlit());\n _fragColor1 = vec4(packNormal(normal), 1.0);\n // _fragColor2 = vec4(vec3(0.0), 1.0);\n _fragColor3 = vec4(color, 1.0);\n}\n\nvoid packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {\n if (alpha <= 0.0) {\n discard;\n }\n _fragColor0 = vec4(albedo.rgb, alpha);\n _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));\n\n}\n\n// The material values (at least the material key) must be precisely bitwise accurate\n// to what is provided by the uniform buffer, or the material key has the wrong bits\n\nstruct Material {\n vec4 _emissiveOpacity;\n vec4 _albedoRoughness;\n vec4 _fresnelMetallic;\n vec4 _scatteringSpare2Key;\n};\n\nuniform materialBuffer {\n Material _mat;\n};\n\nMaterial getMaterial() {\n return _mat;\n}\n\nvec3 getMaterialEmissive(Material m) { return m._emissiveOpacity.rgb; }\nfloat getMaterialOpacity(Material m) { return m._emissiveOpacity.a; }\n\nvec3 getMaterialAlbedo(Material m) { return m._albedoRoughness.rgb; }\nfloat getMaterialRoughness(Material m) { return m._albedoRoughness.a; }\n\nvec3 getMaterialFresnel(Material m) { return m._fresnelMetallic.rgb; }\n\n\nfloat getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }\n\nfloat getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }\n\nfloat getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }\n\nBITFIELD getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }\n\nconst BITFIELD EMISSIVE_VAL_BIT = 0x00000001;\nconst BITFIELD UNLIT_VAL_BIT = 0x00000002;\nconst BITFIELD ALBEDO_VAL_BIT = 0x00000004;\nconst BITFIELD METALLIC_VAL_BIT = 0x00000008;\nconst BITFIELD GLOSSY_VAL_BIT = 0x00000010;\nconst BITFIELD OPACITY_VAL_BIT = 0x00000020;\nconst BITFIELD OPACITY_MASK_MAP_BIT = 0x00000040;\nconst BITFIELD OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;\nconst BITFIELD SCATTERING_VAL_BIT = 0x00000100;\n\n\nconst BITFIELD EMISSIVE_MAP_BIT = 0x00000200;\nconst BITFIELD ALBEDO_MAP_BIT = 0x00000400;\nconst BITFIELD METALLIC_MAP_BIT = 0x00000800;\nconst BITFIELD ROUGHNESS_MAP_BIT = 0x00001000;\nconst BITFIELD NORMAL_MAP_BIT = 0x00002000;\nconst BITFIELD OCCLUSION_MAP_BIT = 0x00004000;\nconst BITFIELD LIGHTMAP_MAP_BIT = 0x00008000;\nconst BITFIELD SCATTERING_MAP_BIT = 0x00010000;\n\n#define TAA_TEXTURE_LOD_BIAS -1.0\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n#define GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES 8\n\nstruct GPUTextureTable {\n uvec4 _textures[GPU_TEXTURE_TABLE_MAX_NUM_TEXTURES];\n};\n\n#define TextureTable(index, name) layout (std140) uniform gpu_resourceTextureTable##index { GPUTextureTable name; }\n\n#define tableTex(name, slot) sampler2D(name._textures[slot].xy)\n#define tableTexMinLod(name, slot) float(name._textures[slot].z)\n\n#define tableTexValue(name, slot, uv) \\\n tableTexValueLod(tableTex(matTex, albedoMap), tableTexMinLod(matTex, albedoMap), uv)\n \nvec4 tableTexValueLod(sampler2D sampler, float minLod, vec2 uv) {\n float queryLod = textureQueryLod(sampler, uv).x;\n queryLod = max(minLod, queryLod);\n return textureLod(sampler, uv, queryLod);\n}\n \n#else\n\n#endif\n\n#ifdef GPU_TEXTURE_TABLE_BINDLESS\n\nTextureTable(0, matTex);\n#define albedoMap 0\nvec4 fetchAlbedoMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, albedoMap, uv);\n}\n#define roughnessMap 4\nfloat fetchRoughnessMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, roughnessMap, uv).r;\n}\n#define metallicMap 2\nfloat fetchMetallicMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, metallicMap, uv).r;\n}\n#define emissiveMap 3\nvec3 fetchEmissiveMap(vec2 uv) {\n // Should take into account TAA_TEXTURE_LOD_BIAS?\n return tableTexValue(matTex, emissiveMap, uv).rgb;\n}\n#define occlusionMap 5\nfloat fetchOcclusionMap(vec2 uv) {\n return tableTexValue(matTex, occlusionMap, uv).r;\n}\n#define scatteringMap 6\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(tableTex(matTex, scatteringMap), uv).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return tableTexValue(matTex, scatteringMap, uv).r; // boolean scattering for now\n}\n#else\n\nuniform sampler2D albedoMap;\nvec4 fetchAlbedoMap(vec2 uv) {\n return texture(albedoMap, uv, TAA_TEXTURE_LOD_BIAS);\n}\nuniform sampler2D roughnessMap;\nfloat fetchRoughnessMap(vec2 uv) {\n return (texture(roughnessMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D metallicMap;\nfloat fetchMetallicMap(vec2 uv) {\n return (texture(metallicMap, uv, TAA_TEXTURE_LOD_BIAS).r);\n}\nuniform sampler2D emissiveMap;\nvec3 fetchEmissiveMap(vec2 uv) {\n return texture(emissiveMap, uv, TAA_TEXTURE_LOD_BIAS).rgb;\n}\nuniform sampler2D occlusionMap;\nfloat fetchOcclusionMap(vec2 uv) {\n return texture(occlusionMap, uv).r;\n}\nuniform sampler2D scatteringMap;\nfloat fetchScatteringMap(vec2 uv) {\n float scattering = texture(scatteringMap, uv, TAA_TEXTURE_LOD_BIAS).r; // boolean scattering for now\n return max(((scattering - 0.1) / 0.9), 0.0);\n return texture(scatteringMap, uv).r; // boolean scattering for now\n}\n#endif\n\n\n\nlayout(location = 1) in vec2 _texCoord0;\nlayout(location = 2) in vec2 _texCoord1;\nlayout(location = 3) in vec3 _normalWS;\nlayout(location = 4) in vec3 _color;\n\nvoid main(void) {\n Material mat = getMaterial();\n BITFIELD matKey = getMaterialKey(mat);\n vec4 albedoTex = (((matKey & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(_texCoord0) : vec4(1.0));\nfloat roughnessTex = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(_texCoord0) : 1.0);\nfloat metallicTex = (((matKey & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(_texCoord0) : 0.0);\nvec3 emissiveTex = (((matKey & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(_texCoord0) : vec3(0.0));\nfloat scatteringTex = (((matKey & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(_texCoord0) : 0.0);\n\n float occlusionTex = (((matKey & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(_texCoord1) : 1.0);\n\n\n float opacity = 1.0;\n {\n const float OPACITY_MASK_THRESHOLD = 0.5;\n opacity = (((matKey & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?\n (((matKey & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, albedoTex.a) : albedoTex.a) :\n 1.0) * opacity;\n}\n;\n {\n if (opacity < 1.0) {\n discard;\n }\n}\n;\n\n vec3 albedo = getMaterialAlbedo(mat);\n {\n albedo.xyz = (((matKey & ALBEDO_VAL_BIT) != 0) ? albedo : vec3(1.0));\n\n if (((matKey & ALBEDO_MAP_BIT) != 0)) {\n albedo.xyz *= albedoTex.xyz;\n }\n}\n;\n albedo *= _color;\n\n float roughness = getMaterialRoughness(mat);\n {\n roughness = (((matKey & ROUGHNESS_MAP_BIT) != 0) ? roughnessTex : roughness);\n}\n;\n\n vec3 emissive = getMaterialEmissive(mat);\n {\n emissive = (((matKey & EMISSIVE_MAP_BIT) != 0) ? emissiveTex : emissive);\n}\n;\n\n float metallic = getMaterialMetallic(mat);\n {\n metallic = (((matKey & METALLIC_MAP_BIT) != 0) ? metallicTex : metallic);\n}\n;\n\n float scattering = getMaterialScattering(mat);\n {\n scattering = (((matKey & SCATTERING_MAP_BIT) != 0) ? scatteringTex : scattering);\n}\n;\n\n packDeferredFragment(\n normalize(_normalWS), \n opacity,\n albedo,\n roughness,\n metallic,\n emissive,\n occlusionTex,\n scattering);\n}\n\n\n"
+ },
+ "pGKo9pqA3ckrezIXHRSQvw==": {
+ "source": "// VERSION 1//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:23:33 2018\n//\n// DrawUnitQuadTexcoord.vert\n//\n// Draw the unit quad [-1,-1 -> 1,1] amd pass along the unit texcoords [0, 0 -> 1, 1]. Not transform used.\n// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed\n//\n// Created by Sam Gateau on 6/22/2015\n// Copyright 2015 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\nlayout(location = 0) out vec2 varTexCoord0;\n\nvoid main(void) {\n const float depth = 1.0;\n const vec4 UNIT_QUAD[4] = vec4[4](\n vec4(-1.0, -1.0, depth, 1.0),\n vec4(1.0, -1.0, depth, 1.0),\n vec4(-1.0, 1.0, depth, 1.0),\n vec4(1.0, 1.0, depth, 1.0)\n );\n vec4 pos = UNIT_QUAD[gl_VertexID];\n\n varTexCoord0 = (pos.xy + 1.0) * 0.5;\n\n gl_Position = pos;\n}\n\n\n//-------- pixel\n\n\n// OpenGLDisplayPlugin_present.frag\n\nuniform sampler2D colorMap;\n\nin vec2 varTexCoord0;\n\nout vec4 outFragColor;\n\nfloat sRGBFloatToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n\n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 colorToLinearRGB(vec3 srgb) {\n return vec3(sRGBFloatToLinear(srgb.r), sRGBFloatToLinear(srgb.g), sRGBFloatToLinear(srgb.b));\n}\n\nvoid main(void) {\n outFragColor.a = 1.0;\n outFragColor.rgb = colorToLinearRGB(texture(colorMap, varTexCoord0).rgb);\n}\n\n"
+ },
+ "pUyFVWTgBDMZyiFHN0OcYQ==": {
+ "source": "// VERSION 0//-------- vertex\n\n//PC 410 core\n// Generated on Wed May 23 14:24:07 2018\n//\n// model_lightmap_normal_map_fade.vert\n// vertex shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nlayout(location = 0) in vec4 inPosition;\nlayout(location = 1) in vec4 inNormal;\nlayout(location = 2) in vec4 inColor;\nlayout(location = 3) in vec4 inTexCoord0;\nlayout(location = 4) in vec4 inTangent;\nlayout(location = 5) in ivec4 inSkinClusterIndex;\nlayout(location = 6) in vec4 inSkinClusterWeight;\nlayout(location = 7) in vec4 inTexCoord1;\nlayout(location = 8) in vec4 inTexCoord2;\nlayout(location = 9) in vec4 inTexCoord3;\nlayout(location = 10) in vec4 inTexCoord4;\n// Linear ====> linear RGB\n// sRGB ======> standard RGB with gamma of 2.2\n// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)\n// https://software.intel.com/en-us/node/503873\n\nfloat color_scalar_sRGBToLinear(float value) {\n const float SRGB_ELBOW = 0.04045;\n \n return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);\n}\n\nvec3 color_sRGBToLinear(vec3 srgb) {\n return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));\n}\n\nvec4 color_sRGBAToLinear(vec4 srgba) {\n return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);\n}\n\nvec3 color_LinearToYCoCg(vec3 rgb) {\n\t// Y = R/4 + G/2 + B/4\n\t// Co = R/2 - B/2\n\t// Cg = -R/4 + G/2 - B/4\n\treturn vec3(\n\t\t\trgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,\n\t\t\trgb.x/2.0 - rgb.z/2.0,\n\t\t-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0\n\t);\n}\n\nvec3 color_YCoCgToUnclampedLinear(vec3 ycocg) {\n\t// R = Y + Co - Cg\n\t// G = Y + Cg\n\t// B = Y - Co - Cg\n\treturn vec3(\n\t\tycocg.x + ycocg.y - ycocg.z,\n\t\tycocg.x + ycocg.z,\n\t\tycocg.x - ycocg.y - ycocg.z\n\t);\n}\n\nvec3 color_YCoCgToLinear(vec3 ycocg) {\n\treturn clamp(color_YCoCgToUnclampedLinear(ycocg), vec3(0.0), vec3(1.0));\n}\n\n// glsl / C++ compatible source as interface for FadeEffect\n#ifdef __cplusplus\n# define _MAT4 Mat4\n# define _VEC4 Vec4\n#\tdefine _MUTABLE mutable\n#else\n# define _MAT4 mat4\n# define _VEC4 vec4\n#\tdefine _MUTABLE \n#endif\n\nstruct _TransformCamera {\n _MUTABLE _MAT4 _view;\n _MUTABLE _MAT4 _viewInverse;\n _MUTABLE _MAT4 _projectionViewUntranslated;\n _MAT4 _projection;\n _MUTABLE _MAT4 _projectionInverse;\n _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.\n _MUTABLE _VEC4 _stereoInfo;\n};\n\n // //\n\n#define TransformCamera _TransformCamera\n\nlayout(std140) uniform transformCameraBuffer {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n TransformCamera _camera[2];\n#else\n TransformCamera _camera;\n#endif\n#else\n TransformCamera _camera;\n#endif\n};\n\n#ifdef GPU_VERTEX_SHADER\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n#ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\nlayout(location=14) in int _inStereoSide;\n#endif\n\nlayout(location=14) flat out int _stereoSide;\n\n// In stereo drawcall mode Instances are drawn twice (left then right) hence the true InstanceID is the gl_InstanceID / 2\nint gpu_InstanceID() {\n return gl_InstanceID >> 1;\n}\n\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n#endif\n#else\n\nint gpu_InstanceID() {\n return gl_InstanceID;\n}\n\n#endif\n\n#endif\n\n#ifdef GPU_PIXEL_SHADER\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\nlayout(location=14) flat in int _stereoSide;\n#endif\n#endif\n\n\nTransformCamera getTransformCamera() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n #ifdef GPU_TRANSFORM_STEREO_CAMERA\n #ifdef GPU_VERTEX_SHADER\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_ATTRIBUTED\n _stereoSide = _inStereoSide;\n #endif\n #ifdef GPU_TRANSFORM_STEREO_CAMERA_INSTANCED\n _stereoSide = gl_InstanceID % 2;\n #endif\n #endif\n return _camera[_stereoSide];\n #else\n return _camera;\n #endif\n#else\n return _camera;\n#endif\n}\n\nvec3 getEyeWorldPos() {\n return getTransformCamera()._viewInverse[3].xyz;\n}\n\nbool cam_isStereo() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n return getTransformCamera()._stereoInfo.x > 0.0;\n#else\n return _camera._stereoInfo.x > 0.0;\n#endif\n}\n\nfloat cam_getStereoSide() {\n#ifdef GPU_TRANSFORM_IS_STEREO\n#ifdef GPU_TRANSFORM_STEREO_CAMERA\n return getTransformCamera()._stereoInfo.y;\n#else\n return _camera._stereoInfo.y;\n#endif\n#else\n return _camera._stereoInfo.y;\n#endif\n}\n\n\nstruct TransformObject {\n mat4 _model;\n mat4 _modelInverse;\n};\n\nlayout(location=15) in ivec2 _drawCallInfo;\n\n#if defined(GPU_SSBO_TRANSFORM_OBJECT)\nlayout(std140) buffer transformObjectBuffer {\n TransformObject _object[];\n};\nTransformObject getTransformObject() {\n TransformObject transformObject = _object[_drawCallInfo.x];\n return transformObject;\n}\n#else\nuniform samplerBuffer transformObjectBuffer;\n\nTransformObject getTransformObject() {\n int offset = 8 * _drawCallInfo.x;\n TransformObject object;\n object._model[0] = texelFetch(transformObjectBuffer, offset);\n object._model[1] = texelFetch(transformObjectBuffer, offset + 1);\n object._model[2] = texelFetch(transformObjectBuffer, offset + 2);\n object._model[3] = texelFetch(transformObjectBuffer, offset + 3);\n\n object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);\n object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);\n object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);\n object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);\n\n return object;\n}\n#endif\n\n\n\n\nconst int MAX_TEXCOORDS = 2;\n\nstruct TexMapArray { \n// mat4 _texcoordTransforms[MAX_TEXCOORDS];\n mat4 _texcoordTransforms0;\n mat4 _texcoordTransforms1;\n vec4 _lightmapParams;\n};\n\nuniform texMapArrayBuffer {\n TexMapArray _texMapArray;\n};\n\nTexMapArray getTexMapArray() {\n return _texMapArray;\n}\n\n\n\nlayout(location = 0) out vec4 _positionES;\nlayout(location = 1) out vec2 _texCoord0;\nlayout(location = 2) out vec2 _texCoord1;\nlayout(location = 3) out vec3 _normalWS;\nlayout(location = 4) out vec3 _tangentWS;\nlayout(location = 5) out vec3 _color;\nlayout(location = 6) out vec4 _positionWS;\n\nvoid main(void) {\n // pass along the color in linear space\n _color = color_sRGBToLinear(inColor.xyz);\n\n TexMapArray texMapArray = getTexMapArray();\n {\n _texCoord0 = (texMapArray._texcoordTransforms0 * vec4(inTexCoord0.st, 0.0, 1.0)).st;\n}\n\n {\n _texCoord1 = (texMapArray._texcoordTransforms1 * vec4(inTexCoord1.st, 0.0, 1.0)).st;\n}\n\n\n // standard transform\n TransformCamera cam = getTransformCamera();\n TransformObject obj = getTransformObject();\n { // transformModelToEyeAndClipPos\n vec4 eyeWAPos;\n { // _transformModelToEyeWorldAlignedPos\n highp mat4 _mv = obj._model;\n _mv[3].xyz -= cam._viewInverse[3].xyz;\n highp vec4 _eyeWApos = (_mv * inPosition);\n eyeWAPos = _eyeWApos;\n }\n\n gl_Position = cam._projectionViewUntranslated * eyeWAPos;\n _positionES = vec4((cam._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);\n \n {\n#ifdef GPU_TRANSFORM_IS_STEREO\n\n#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN\n vec4 eyeClipEdge[2]= vec4[2](vec4(-1,0,0,1), vec4(1,0,0,1));\n vec2 eyeOffsetScale = vec2(-0.5, +0.5);\n uint eyeIndex = uint(_stereoSide);\n gl_ClipDistance[0] = dot(gl_Position, eyeClipEdge[eyeIndex]);\n float newClipPosX = gl_Position.x * 0.5 + eyeOffsetScale[eyeIndex] * gl_Position.w;\n gl_Position.x = newClipPosX;\n#endif\n\n#else\n#endif\n }\n\n }\n\n { // transformModelToWorldPos\n _positionWS = (obj._model * inPosition);\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _normalWS = vec3(dot(mr0, inNormal.xyz), dot(mr1, inNormal.xyz), dot(mr2, inNormal.xyz));\n }\n\n { // transformModelToEyeDir\t\t\n vec3 mr0 = obj._modelInverse[0].xyz;\n vec3 mr1 = obj._modelInverse[1].xyz;\n vec3 mr2 = obj._modelInverse[2].xyz;\n _tangentWS = vec3(dot(mr0, inTangent.xyz), dot(mr1, inTangent.xyz), dot(mr2, inTangent.xyz));\n }\n\n}\n\n\n//-------- pixel\n\n//PC 410 core\n// Generated on Wed May 23 14:24:08 2018\n//\n// model_lightmap_normal_map_fade.frag\n// fragment shader\n//\n// Created by Olivier Prat on 06/05/17.\n// Copyright 2017 High Fidelity, Inc.\n//\n// Distributed under the Apache License, Version 2.0.\n// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html\n//\n\nvec2 signNotZero(vec2 v) {\n return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);\n}\n\nvec2 float32x3_to_oct(in vec3 v) {\n vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);\n}\n\n\nvec3 oct_to_float32x3(in vec2 e) {\n vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n if (v.z < 0.0) {\n v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n }\n return normalize(v);\n}\n\nvec3 snorm12x2_to_unorm8x3(vec2 f) {\n vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));\n float t = floor(u.y / 256.0);\n\n return floor(vec3(\n u.x / 16.0,\n fract(u.x / 16.0) * 256.0 + t,\n u.y - t * 256.0\n )) / 255.0;\n}\n\nvec2 unorm8x3_to_snorm12x2(vec3 u) {\n u *= 255.0;\n u.y *= (1.0 / 16.0);\n vec2 s = vec2( u.x * 16.0 + floor(u.y),\n fract(u.y) * (16.0 * 256.0) + u.z);\n return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));\n}\n\n\n// Recommended function to pack/unpack vec3