Gpu Frame serialization

This commit is contained in:
Brad Davis 2018-10-14 21:24:20 -07:00
parent 6fb869126e
commit e18e3fc138
58 changed files with 3528 additions and 98 deletions

View file

@ -0,0 +1,5 @@
set(TARGET_NAME framePlayer)
setup_hifi_library(AndroidExtras)
link_hifi_libraries(shared ktx shaders qml gpu gl ${PLATFORM_GL_BACKEND})
target_link_libraries(${TARGET_NAME} android log m)
target_opengl()

View file

@ -0,0 +1,50 @@
apply plugin: 'com.android.application'
android {
signingConfigs {
release {
keyAlias 'key0'
keyPassword 'password'
storeFile file('C:/android/keystore.jks')
storePassword 'password'
}
}
compileSdkVersion 28
defaultConfig {
applicationId "io.highfidelity.frameplayer"
minSdkVersion 25
targetSdkVersion 28
ndk { abiFilters 'arm64-v8a' }
externalNativeBuild {
cmake {
arguments '-DHIFI_ANDROID=1',
'-DHIFI_ANDROID_APP=framePlayer',
'-DANDROID_TOOLCHAIN=clang',
'-DANDROID_STL=c++_shared',
'-DCMAKE_VERBOSE_MAKEFILE=ON'
targets = ['framePlayer']
}
}
}
compileOptions {
sourceCompatibility JavaVersion.VERSION_1_8
targetCompatibility JavaVersion.VERSION_1_8
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
signingConfig signingConfigs.release
}
}
externalNativeBuild.cmake.path '../../../CMakeLists.txt'
}
dependencies {
implementation fileTree(include: ['*.jar'], dir: '../../libraries/qt/libs')
//implementation project(':oculus')
implementation project(':qt')
}

View file

@ -0,0 +1,25 @@
# Add project specific ProGuard rules here.
# By default, the flags in this file are appended to flags specified
# in C:\Android\SDK/tools/proguard/proguard-android.txt
# You can edit the include path and order by changing the proguardFiles
# directive in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# Add any project specific keep options here:
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}
# Uncomment this to preserve the line number information for
# debugging stack traces.
#-keepattributes SourceFile,LineNumberTable
# If you keep the line number information, uncomment this to
# hide the original source file name.
#-renamesourcefileattribute SourceFile

View file

@ -0,0 +1,38 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
package="io.highfidelity.frameplayer"
android:versionCode="1"
android:versionName="1.0"
android:installLocation="auto">
<uses-feature android:glEsVersion="0x00030002" android:required="true" />
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" />
<application android:label="Frame Viewer"
android:allowBackup="false"
android:name="org.qtproject.qt5.android.bindings.QtApplication"
tools:ignore="GoogleAppIndexingWarning,MissingApplicationIcon">
<meta-data android:name="com.samsung.android.vr.application.mode" android:value="vr_only"/>
<activity
android:name="org.qtproject.qt5.android.bindings.QtActivity"
android:theme="@android:style/Theme.Black.NoTitleBar.Fullscreen"
android:launchMode="singleTask"
android:screenOrientation="landscape"
android:excludeFromRecents="false"
android:configChanges="screenSize|screenLayout|orientation|keyboardHidden|keyboard|navigation|uiMode">
<!-- JNI nonsense -->
<meta-data android:name="android.app.lib_name" android:value="framePlayer"/>
<!-- Qt nonsense -->
<meta-data android:name="android.app.qt_libs_resource_id" android:resource="@array/qt_libs"/>
<meta-data android:name="android.app.bundled_in_lib_resource_id" android:resource="@array/bundled_in_lib"/>
<meta-data android:name="android.app.bundled_in_assets_resource_id" android:resource="@array/bundled_in_assets"/>
<meta-data android:name="android.app.load_local_libs" android:value="plugins/platforms/android/libqtforandroid.so:plugins/bearer/libqandroidbearer.so"/>
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>

View file

@ -0,0 +1,6 @@
<!DOCTYPE RCC>
<RCC version="1.0">
<qresource prefix="/">
<file>qml/main.qml</file>
</qresource>
</RCC>

View file

@ -0,0 +1,91 @@
//
// Created by Bradley Austin Davis on 2018/10/21
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "PlayerWindow.h"
#include <QtCore/QFileInfo>
#include <QtGui/QImageReader>
#include <QtQml/QQmlContext>
#include <QtQuick/QQuickItem>
#include <gpu/Frame.h>
#include <gpu/FrameIO.h>
PlayerWindow::PlayerWindow() {
setFlags(Qt::MSWindowsOwnDC | Qt::Window | Qt::Dialog | Qt::WindowMinMaxButtonsHint | Qt::WindowTitleHint);
setSurfaceType(QSurface::OpenGLSurface);
create();
showFullScreen();
// Make sure the window has been created by processing events
QCoreApplication::processEvents();
// Start the rendering thread
_renderThread.initialize(this, &_surface);
// Start the UI
_surface.resize(size());
connect(&_surface, &hifi::qml::OffscreenSurface::rootContextCreated, this, [](QQmlContext* context){
context->setContextProperty("FRAMES_FOLDER", "file:assets:/frames");
});
_surface.load("qrc:///qml/main.qml");
// Connect the UI handler
QObject::connect(_surface.getRootItem(), SIGNAL(loadFile(QString)),
this, SLOT(loadFile(QString))
);
// Turn on UI input events
installEventFilter(&_surface);
}
PlayerWindow::~PlayerWindow() {
}
// static const char* FRAME_FILE = "assets:/frames/20190110_1635.json";
static void textureLoader(const std::string& filename, const gpu::TexturePointer& texture, uint16_t layer) {
QImage image;
QImageReader(filename.c_str()).read(&image);
if (layer > 0) {
return;
}
texture->assignStoredMip(0, image.byteCount(), image.constBits());
}
void PlayerWindow::loadFile(QString filename) {
QString realFilename = QUrl(filename).toLocalFile();
if (QFileInfo(realFilename).exists()) {
auto frame = gpu::readFrame(realFilename.toStdString(), _renderThread._externalTexture, &textureLoader);
_surface.pause();
_renderThread.submitFrame(frame);
}
}
void PlayerWindow::touchEvent(QTouchEvent* event) {
// Super basic input handling when the 3D scene is active.... tap with two finders to return to the
// QML UI
static size_t touches = 0;
switch (event->type()) {
case QEvent::TouchBegin:
case QEvent::TouchUpdate:
touches = std::max<size_t>(touches, event->touchPoints().size());
break;
case QEvent::TouchEnd:
if (touches >= 2) {
_renderThread.submitFrame(nullptr);
_surface.resume();
}
touches = 0;
break;
default:
break;
}
}

View file

@ -0,0 +1,35 @@
//
// Created by Bradley Austin Davis on 2018/10/21
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#include <QtGui/QWindow>
#include <QtCore/QSettings>
#include <qml/OffscreenSurface.h>
#include <gpu/Forward.h>
#include "RenderThread.h"
// Create a simple OpenGL window that renders text in various ways
class PlayerWindow : public QWindow {
Q_OBJECT
public:
PlayerWindow();
virtual ~PlayerWindow();
protected:
void touchEvent(QTouchEvent *ev) override;
public slots:
void loadFile(QString filename);
private:
hifi::qml::OffscreenSurface _surface;
QSettings _settings;
RenderThread _renderThread;
};

View file

@ -0,0 +1,162 @@
//
// Created by Bradley Austin Davis on 2018/10/21
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "RenderThread.h"
#include <QtGui/QWindow>
void RenderThread::submitFrame(const gpu::FramePointer& frame) {
std::unique_lock<std::mutex> lock(_frameLock);
_pendingFrames.push(frame);
}
void RenderThread::move(const glm::vec3& v) {
std::unique_lock<std::mutex> lock(_frameLock);
_correction = glm::inverse(glm::translate(mat4(), v)) * _correction;
}
void RenderThread::setup() {
// Wait until the context has been moved to this thread
{ std::unique_lock<std::mutex> lock(_frameLock); }
makeCurrent();
// Disable vsync for profiling
::gl::setSwapInterval(0);
glClearColor(1, 1, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
_glContext.swapBuffers();
// GPU library init
gpu::Context::init<gpu::gl::GLBackend>();
_gpuContext = std::make_shared<gpu::Context>();
_backend = _gpuContext->getBackend();
_gpuContext->beginFrame();
_gpuContext->endFrame();
makeCurrent();
glGenFramebuffers(1, &_uiFbo);
glGenTextures(1, &_externalTexture);
glBindTexture(GL_TEXTURE_2D, _externalTexture);
static const glm::u8vec4 color{ 0 };
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, &color);
glClearColor(0, 1, 1, 1);
glClear(GL_COLOR_BUFFER_BIT);
_glContext.swapBuffers();
}
void RenderThread::initialize(QWindow* window, hifi::qml::OffscreenSurface* offscreen) {
std::unique_lock<std::mutex> lock(_frameLock);
setObjectName("RenderThread");
Parent::initialize();
_offscreen = offscreen;
_window = window;
_glContext.setWindow(_window);
_glContext.create();
_glContext.makeCurrent();
hifi::qml::OffscreenSurface::setSharedContext(_glContext.qglContext());
glClearColor(1, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
_glContext.swapBuffers();
_glContext.doneCurrent();
_glContext.moveToThread(_thread);
_thread->setObjectName("RenderThread");
}
void RenderThread::shutdown() {
_activeFrame.reset();
while (!_pendingFrames.empty()) {
_gpuContext->consumeFrameUpdates(_pendingFrames.front());
_pendingFrames.pop();
}
_gpuContext->shutdown();
_gpuContext.reset();
}
void RenderThread::renderFrame() {
auto windowSize = _window->geometry().size();
uvec2 readFboSize;
uint32_t readFbo{ 0 };
if (_activeFrame) {
const auto &frame = _activeFrame;
_backend->recycle();
_backend->syncCache();
_gpuContext->enableStereo(frame->stereoState._enable);
if (frame && !frame->batches.empty()) {
_gpuContext->executeFrame(frame);
}
auto &glBackend = static_cast<gpu::gl::GLBackend&>(*_backend);
readFbo = glBackend.getFramebufferID(frame->framebuffer);
readFboSize = frame->framebuffer->getSize();
CHECK_GL_ERROR();
} else {
hifi::qml::OffscreenSurface::TextureAndFence newTextureAndFence;
if (_offscreen->fetchTexture(newTextureAndFence)) {
if (_uiTexture != 0) {
auto readFence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
_offscreen->getDiscardLambda()(_uiTexture, readFence);
_uiTexture = 0;
}
glWaitSync((GLsync)newTextureAndFence.second, 0, GL_TIMEOUT_IGNORED);
glDeleteSync((GLsync)newTextureAndFence.second);
_uiTexture = newTextureAndFence.first;
glBindFramebuffer(GL_READ_FRAMEBUFFER, _uiFbo);
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, _uiTexture, 0);
}
if (_uiTexture != 0) {
readFbo = _uiFbo;
readFboSize = { windowSize.width(), windowSize.height() };
}
}
if (readFbo) {
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
glBlitFramebuffer(
0, 0, readFboSize.x, readFboSize.y,
0, 0, windowSize.width(), windowSize.height(),
GL_COLOR_BUFFER_BIT, GL_NEAREST);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
} else {
glClearColor(1, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
}
_glContext.swapBuffers();
}
void RenderThread::updateFrame() {
std::queue<gpu::FramePointer> pendingFrames;
{
std::unique_lock<std::mutex> lock(_frameLock);
pendingFrames.swap(_pendingFrames);
}
while (!pendingFrames.empty()) {
_activeFrame = pendingFrames.front();
pendingFrames.pop();
if (_activeFrame) {
_gpuContext->consumeFrameUpdates(_activeFrame);
}
}
}
bool RenderThread::process() {
updateFrame();
makeCurrent();
renderFrame();
return true;
}

View file

@ -0,0 +1,54 @@
//
// Created by Bradley Austin Davis on 2018/10/21
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#include <GenericThread.h>
#include <gl/Context.h>
#include <gpu/gl/GLBackend.h>
#include <qml/OffscreenSurface.h>
class RenderThread : public GenericThread {
using Parent = GenericThread;
public:
QWindow* _window{ nullptr };
std::mutex _mutex;
gpu::ContextPointer _gpuContext; // initialized during window creation
std::shared_ptr<gpu::Backend> _backend;
std::atomic<size_t> _presentCount{ 0 };
std::mutex _frameLock;
std::queue<gpu::FramePointer> _pendingFrames;
gpu::FramePointer _activeFrame;
uint32_t _externalTexture{ 0 };
glm::mat4 _correction;
hifi::qml::OffscreenSurface* _offscreen{ nullptr };
gl::Context _glContext;
uint32_t _uiTexture{ 0 };
uint32_t _uiFbo{ 0 };
void move(const glm::vec3& v);
void setup() override;
bool process() override;
void shutdown() override;
void initialize(QWindow* window, hifi::qml::OffscreenSurface* offscreen);
void submitFrame(const gpu::FramePointer& frame);
void updateFrame();
void renderFrame();
bool makeCurrent() {
return _glContext.makeCurrent();
}
void doneCurrent() {
_glContext.doneCurrent();
}
};

View file

@ -0,0 +1,54 @@
//
// Created by Bradley Austin Davis on 2018/11/22
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <android/log.h>
#include <QtGui/QGuiApplication>
#include <QtCore/QTimer>
#include <QtCore/QFileInfo>
#include <Trace.h>
#include "PlayerWindow.h"
void messageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message) {
if (!message.isEmpty()) {
const char * local=message.toStdString().c_str();
switch (type) {
case QtDebugMsg:
__android_log_write(ANDROID_LOG_DEBUG,"Interface",local);
break;
case QtInfoMsg:
__android_log_write(ANDROID_LOG_INFO,"Interface",local);
break;
case QtWarningMsg:
__android_log_write(ANDROID_LOG_WARN,"Interface",local);
break;
case QtCriticalMsg:
__android_log_write(ANDROID_LOG_ERROR,"Interface",local);
break;
case QtFatalMsg:
default:
__android_log_write(ANDROID_LOG_FATAL,"Interface",local);
abort();
}
}
}
int main(int argc, char** argv) {
setupHifiApplication("gpuFramePlayer");
QGuiApplication app(argc, argv);
auto oldMessageHandler = qInstallMessageHandler(messageHandler);
DependencyManager::set<tracing::Tracer>();
PlayerWindow window;
app.exec();
qInstallMessageHandler(oldMessageHandler);
return 0;
}

View file

@ -0,0 +1,36 @@
import QtQuick 2.2
import QtQuick.Dialogs 1.1
import Qt.labs.folderlistmodel 2.11
Item {
id: root
width: 640
height: 480
ListView {
anchors.fill: parent
FolderListModel {
id: folderModel
folder: FRAMES_FOLDER
nameFilters: ["*.json"]
}
Component {
id: fileDelegate
Text {
text: fileName
font.pointSize: 36
MouseArea {
anchors.fill: parent
onClicked: root.loadFile(folderModel.folder + "/" + fileName);
}
}
}
model: folderModel
delegate: fileDelegate
}
signal loadFile(string filename);
}

View file

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="utf-8"?>
<!--suppress AndroidUnknownAttribute -->
<vector xmlns:api24="http://schemas.android.com/apk/res/android" xmlns:android="http://schemas.android.com/apk/res/android"
android:viewportWidth="192"
android:viewportHeight="192"
android:width="192dp"
android:height="192dp">
<path
android:pathData="M189.5 96.5A93.5 93.5 0 0 1 96 190 93.5 93.5 0 0 1 2.5 96.5 93.5 93.5 0 0 1 96 3 93.5 93.5 0 0 1 189.5 96.5Z"
android:fillColor="#333333" />
<path
android:pathData="M96.2 173.1c-10.3 0 -20.4 -2.1 -29.8 -6 -9.2 -3.8 -17.3 -9.4 -24.3 -16.4 -7 -7 -12.6 -15.2 -16.4 -24.3 -4.1 -9.6 -6.2 -19.6 -6.2 -30 0 -10.3 2.1 -20.4 6 -29.8 3.8 -9.2 9.4 -17.3 16.4 -24.3 7 -7 15.2 -12.6 24.3 -16.4 9.5 -4 19.5 -6 29.8 -6 10.3 0 20.4 2.1 29.8 6 9.2 3.8 17.3 9.4 24.3 16.4 7 7 12.6 15.2 16.4 24.3 4 9.5 6 19.5 6 29.8 0 10.3 -2.1 20.4 -6 29.8 -3.8 9.2 -9.4 17.3 -16.4 24.3 -7 7 -15.2 12.6 -24.3 16.4 -9.2 4.1 -19.3 6.2 -29.6 6.2zm0 -145.3c-37.8 0 -68.6 30.8 -68.6 68.6 0 37.8 30.8 68.6 68.6 68.6 37.8 0 68.6 -30.8 68.6 -68.6 0 -37.8 -30.8 -68.6 -68.6 -68.6z"
android:fillColor="#00b4f0" />
<path
android:pathData="M119.6 129l0 -53.8c3.4 -1.1 5.8 -4.3 5.8 -8 0 -4.6 -3.8 -8.4 -8.4 -8.4 -4.6 0 -8.4 3.8 -8.4 8.4 0 3.6 2.2 6.6 5.4 7.9l0 25L79 83.8 79 64c3.4 -1.1 5.8 -4.3 5.8 -8 0 -4.6 -3.8 -8.4 -8.4 -8.4 -4.6 0 -8.4 3.8 -8.4 8.4 0 3.6 2.2 6.6 5.4 7.9l0 54.1c-3.1 1.2 -5.4 4.3 -5.4 7.9 0 4.6 3.8 8.4 8.4 8.4 4.6 0 8.4 -3.8 8.4 -8.4 0 -3.7 -2.4 -6.9 -5.8 -8l0 -27.3 35 16.3 0 22.2c-3.1 1.2 -5.4 4.3 -5.4 7.9 0 4.6 3.8 8.4 8.4 8.4 4.6 0 8.4 -3.8 8.4 -8.4 0 -3.8 -2.4 -6.9 -5.8 -8z"
android:fillColor="#00b4f0" />
</vector>

View file

@ -0,0 +1,3 @@
<resources>
<string name="app_name" translatable="false">GPU Frame Player</string>
</resources>

View file

@ -3,3 +3,6 @@ project(':qt').projectDir = new File(settingsDir, 'libraries/qt')
include ':interface'
project(':interface').projectDir = new File(settingsDir, 'apps/interface')
//include ':framePlayer'
//project(':framePlayer').projectDir = new File(settingsDir, 'apps/framePlayer')

View file

@ -7,6 +7,7 @@
#
macro(TARGET_GLAD)
if (ANDROID)
include(SelectLibraryConfigurations)
set(INSTALL_DIR ${HIFI_ANDROID_PRECOMPILED}/glad)
set(GLAD_INCLUDE_DIRS "${INSTALL_DIR}/include")
set(GLAD_LIBRARY_DEBUG ${INSTALL_DIR}/lib/libglad_d.a)
@ -31,8 +32,8 @@ macro(TARGET_GLAD)
set(GLAD_INCLUDE_DIRS ${${GLAD_UPPER}_INCLUDE_DIRS})
set(GLAD_LIBRARY ${${GLAD_UPPER}_LIBRARY})
endif()
target_include_directories(${TARGET_NAME} PUBLIC ${GLAD_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${GLAD_LIBRARY})
target_link_libraries(${TARGET_NAME} ${GLAD_EXTRA_LIBRARIES})
target_link_libraries(${TARGET_NAME} ${GLAD_EXTRA_LIBRARIES})
endmacro()

View file

@ -6,8 +6,13 @@
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
#
macro(TARGET_ZLIB)
# using VCPKG for zlib
find_package(ZLIB REQUIRED)
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE ${ZLIB_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${ZLIB_LIBRARIES})
if (ANDROID)
# zlib is part of the NDK
target_link_libraries(${TARGET_NAME} z)
else()
# using VCPKG for zlib
find_package(ZLIB REQUIRED)
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE ${ZLIB_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${ZLIB_LIBRARIES})
endif()
endmacro()

View file

@ -0,0 +1,26 @@
import QtQuick 2.2
import QtQuick.Dialogs 1.1
import Qt.labs.folderlistmodel 2.11
Item {
width: 640
height: 480
ListView {
width: 200; height: 400
FolderListModel {
id: folderModel
folder: "assets:/frames/"
nameFilters: ["*.json"]
}
Component {
id: fileDelegate
Text { text: fileName }
}
model: folderModel
delegate: fileDelegate
}
}

View file

@ -3410,8 +3410,10 @@ void Application::resizeGL() {
auto renderConfig = _graphicsEngine.getRenderEngine()->getConfiguration();
assert(renderConfig);
auto mainView = renderConfig->getConfig("RenderMainView.RenderDeferredTask");
assert(mainView);
mainView->setProperty("resolutionScale", renderResolutionScale);
// mainView can be null if we're rendering in forward mode
if (mainView) {
mainView->setProperty("resolutionScale", renderResolutionScale);
}
displayPlugin->setRenderResolutionScale(renderResolutionScale);
}
@ -3959,6 +3961,19 @@ void Application::keyPressEvent(QKeyEvent* event) {
}
break;
case Qt::Key_G:
if (isShifted && isMeta) {
static const QString HIFI_FRAMES_FOLDER_VAR = "HIFI_FRAMES_FOLDER";
static const QString GPU_FRAME_FOLDER = QProcessEnvironment::systemEnvironment().contains(HIFI_FRAMES_FOLDER_VAR)
? QProcessEnvironment::systemEnvironment().value(HIFI_FRAMES_FOLDER_VAR)
: "hifiFrames";
static QString GPU_FRAME_TEMPLATE = GPU_FRAME_FOLDER + "/{DATE}_{TIME}";
QString fullPath = FileUtils::computeDocumentPath(FileUtils::replaceDateTimeTokens(GPU_FRAME_TEMPLATE));
if (FileUtils::canCreateFile(fullPath)) {
getActiveDisplayPlugin()->captureFrame(fullPath.toStdString());
}
}
break;
case Qt::Key_X:
if (isShifted && isMeta) {
auto offscreenUi = getOffscreenUI();

View file

@ -15,8 +15,10 @@
#include <QtCore/QCoreApplication>
#include <QtCore/QThread>
#include <QtCore/QTimer>
#include <QtCore/QFileInfo>
#include <QtGui/QImage>
#include <QtGui/QImageWriter>
#include <QtGui/QOpenGLFramebufferObject>
#include <NumericalConstants.h>
@ -30,6 +32,7 @@
#include <gl/OffscreenGLCanvas.h>
#include <gpu/Texture.h>
#include <gpu/FrameIO.h>
#include <shaders/Shaders.h>
#include <gpu/gl/GLShared.h>
#include <gpu/gl/GLBackend.h>
@ -465,11 +468,43 @@ void OpenGLDisplayPlugin::submitFrame(const gpu::FramePointer& newFrame) {
});
}
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor) {
renderFromTexture(batch, texture, viewport, scissor, gpu::FramebufferPointer());
void OpenGLDisplayPlugin::captureFrame(const std::string& filename) const {
withOtherThreadContext([&] {
using namespace gpu;
auto glBackend = const_cast<OpenGLDisplayPlugin&>(*this).getGLBackend();
FramebufferPointer framebuffer{ Framebuffer::create("captureFramebuffer") };
TextureCapturer captureLambda = [&](const std::string& filename, const gpu::TexturePointer& texture, uint16 layer) {
QImage image;
if (texture->getUsageType() == TextureUsageType::STRICT_RESOURCE) {
image = QImage{ 1, 1, QImage::Format_ARGB32 };
auto storedImage = texture->accessStoredMipFace(0, 0);
memcpy(image.bits(), storedImage->data(), image.sizeInBytes());
//if (texture == textureCache->getWhiteTexture()) {
//} else if (texture == textureCache->getBlackTexture()) {
//} else if (texture == textureCache->getBlueTexture()) {
//} else if (texture == textureCache->getGrayTexture()) {
} else {
ivec4 rect = { 0, 0, texture->getWidth(), texture->getHeight() };
framebuffer->setRenderBuffer(0, texture, layer);
glBackend->syncGPUObject(*framebuffer);
image = QImage{ rect.z, rect.w, QImage::Format_ARGB32 };
glBackend->downloadFramebuffer(framebuffer, rect, image);
}
QImageWriter(filename.c_str()).write(image);
};
if (_currentFrame) {
gpu::writeFrame(filename, _currentFrame, captureLambda);
}
});
}
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor, gpu::FramebufferPointer copyFbo /*=gpu::FramebufferPointer()*/) {
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor) {
renderFromTexture(batch, texture, viewport, scissor, nullptr);
}
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor, const gpu::FramebufferPointer& copyFbo /*=gpu::FramebufferPointer()*/) {
auto fbo = gpu::FramebufferPointer();
batch.enableStereo(false);
batch.resetViewTransform();

View file

@ -47,7 +47,7 @@ public:
void endSession() override final;
bool eventFilter(QObject* receiver, QEvent* event) override;
bool isDisplayVisible() const override { return true; }
void captureFrame(const std::string& outputName) const override;
void submitFrame(const gpu::FramePointer& newFrame) override;
glm::uvec2 getRecommendedRenderSize() const override {
@ -113,8 +113,8 @@ protected:
// Plugin specific functionality to send the composed scene to the output window or device
virtual void internalPresent();
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor, gpu::FramebufferPointer fbo);
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor);
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor, const gpu::FramebufferPointer& fbo);
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor);
virtual void updateFrameData();
virtual glm::mat4 getViewCorrection() { return glm::mat4(); }

View file

@ -69,7 +69,10 @@ bool DebugHmdDisplayPlugin::internalActivate() {
_eyeInverseProjections[1] = glm::inverse(_eyeProjections[1]);
_eyeOffsets[0][3] = vec4{ -0.0327499993, 0.0, -0.0149999997, 1.0 };
_eyeOffsets[1][3] = vec4{ 0.0327499993, 0.0, -0.0149999997, 1.0 };
_renderTargetSize = { 3024, 1680 };
// Test HMD per-eye resolution
_renderTargetSize = uvec2{ 1214 * 2 , 1344 };
// uncomment to capture a quarter size frame
//_renderTargetSize /= 2;
_cullingProjection = _eyeProjections[0];
// This must come after the initialization, so that the values calculated
// above are available during the customizeContext call (when not running

View file

@ -118,6 +118,8 @@ void gl::setSwapInterval(int interval) {
wglSwapIntervalEXT(interval);
#elif defined(Q_OS_MAC)
CGLSetParameter(CGLGetCurrentContext(), kCGLCPSwapInterval, &interval);
#elif defined(Q_OS_ANDROID)
eglSwapInterval(eglGetCurrentDisplay(), interval);
#else
Q_UNUSED(interval);
#endif

View file

@ -34,22 +34,9 @@ bool Context::USE_CUSTOM_CONTEXT { true };
#endif
bool Context::enableDebugLogger() {
#if defined(Q_OS_MAC)
// OSX does not support GL_KHR_debug or GL_ARB_debug_output
return false;
#else
#if defined(DEBUG) || defined(USE_GLES)
static bool enableDebugLogger = true;
#else
static const QString DEBUG_FLAG("HIFI_DEBUG_OPENGL");
static bool enableDebugLogger = QProcessEnvironment::systemEnvironment().contains(DEBUG_FLAG);
#endif
return enableDebugLogger;
#endif
return gl::debugContextEnabled();
}
std::atomic<size_t> Context::_totalSwapchainMemoryUsage { 0 };
size_t Context::getSwapchainMemoryUsage() { return _totalSwapchainMemoryUsage.load(); }

View file

@ -52,6 +52,11 @@ void Context::moveToThread(QThread* thread) {
}
void Context::debugMessageHandler(const QOpenGLDebugMessage& debugMessage) {
auto type = debugMessage.type();
if (type == QOpenGLDebugMessage::PerformanceType) {
return;
}
auto severity = debugMessage.severity();
switch (severity) {
case QOpenGLDebugMessage::NotificationSeverity:
@ -60,13 +65,13 @@ void Context::debugMessageHandler(const QOpenGLDebugMessage& debugMessage) {
default:
break;
}
qDebug(glLogging) << debugMessage;
qWarning(glLogging) << debugMessage;
return;
}
void Context::setupDebugLogging(QOpenGLContext *context) {
QOpenGLDebugLogger *logger = new QOpenGLDebugLogger(context);
QObject::connect(logger, &QOpenGLDebugLogger::messageLogged, nullptr, [](const QOpenGLDebugMessage& message){
QObject::connect(logger, &QOpenGLDebugLogger::messageLogged, context, [](const QOpenGLDebugMessage& message){
Context::debugMessageHandler(message);
});
if (logger->initialize()) {

View file

@ -198,11 +198,48 @@ namespace gl {
bool checkGLErrorDebug(const char* name) {
#ifdef DEBUG
// Disabling error checking macro on Android debug builds for now,
// as it throws off performance testing, which must be done on
// Debug builds
#if defined(DEBUG) && !defined(Q_OS_ANDROID)
return checkGLError(name);
#else
Q_UNUSED(name);
return false;
#endif
}
// Enables annotation of captures made by tools like renderdoc
bool khrDebugEnabled() {
static std::once_flag once;
static bool khrDebug = false;
std::call_once(once, [&] {
khrDebug = nullptr != glPushDebugGroupKHR;
});
return khrDebug;
}
// Enables annotation of captures made by tools like renderdoc
bool extDebugMarkerEnabled() {
static std::once_flag once;
static bool extMarker = false;
std::call_once(once, [&] {
extMarker = nullptr != glPushGroupMarkerEXT;
});
return extMarker;
}
bool debugContextEnabled() {
#if defined(Q_OS_MAC)
// OSX does not support GL_KHR_debug or GL_ARB_debug_output
static bool enableDebugLogger = false;
#elif defined(DEBUG) || defined(USE_GLES)
//static bool enableDebugLogger = true;
static bool enableDebugLogger = false;
#else
static const QString DEBUG_FLAG("HIFI_DEBUG_OPENGL");
static bool enableDebugLogger = QProcessEnvironment::systemEnvironment().contains(DEBUG_FLAG);
#endif
return enableDebugLogger;
}
}

View file

@ -37,7 +37,13 @@ bool isRenderThread();
namespace gl {
void globalLock();
void globalRelease(bool finish = true);
bool debugContextEnabled();
bool khrDebugEnabled();
bool extDebugMarkerEnabled();
void withSavedContext(const std::function<void()>& f);
bool checkGLError(const char* name);

View file

@ -392,8 +392,38 @@ void GLBackend::renderPassDraw(const Batch& batch) {
}
}
// Support annotating captures in tools like Renderdoc
class GlDuration {
public:
#ifdef USE_GLES
GlDuration(const char* name) {
// We need to use strlen here instead of -1, because the Snapdragon profiler
// will crash otherwise
glPushDebugGroup(GL_DEBUG_SOURCE_APPLICATION, 0, strlen(name), name);
}
~GlDuration() {
glPopDebugGroup();
}
#else
GlDuration(const char* name) {
if (::gl::khrDebugEnabled()) {
glPushDebugGroupKHR(GL_DEBUG_SOURCE_APPLICATION_KHR, 0, -1, name);
}
}
~GlDuration() {
if (::gl::khrDebugEnabled()) {
glPopDebugGroupKHR();
}
}
#endif
};
#define GL_PROFILE_RANGE(category, name) \
PROFILE_RANGE(category, name); \
GlDuration glProfileRangeThis(name);
void GLBackend::render(const Batch& batch) {
PROFILE_RANGE(render_gpu_gl, batch.getName());
GL_PROFILE_RANGE(render_gpu_gl, batch.getName().c_str());
_transform._skybox = _stereo._skybox = batch.isSkyboxEnabled();
// Allow the batch to override the rendering stereo settings
@ -406,7 +436,7 @@ void GLBackend::render(const Batch& batch) {
_transform._projectionJitter = Vec2(0.0f, 0.0f);
{
PROFILE_RANGE(render_gpu_gl_detail, "Transfer");
GL_PROFILE_RANGE(render_gpu_gl_detail, "Transfer");
renderPassTransfer(batch);
}
@ -416,7 +446,7 @@ void GLBackend::render(const Batch& batch) {
}
#endif
{
PROFILE_RANGE(render_gpu_gl_detail, _stereo.isStereo() ? "Render Stereo" : "Render");
GL_PROFILE_RANGE(render_gpu_gl_detail, _stereo.isStereo() ? "Render Stereo" : "Render");
renderPassDraw(batch);
}
#ifdef GPU_STEREO_DRAWCALL_INSTANCED

View file

@ -4,3 +4,4 @@ setup_hifi_library()
link_hifi_libraries(shared ktx shaders)
target_nsight()
target_json()

View file

@ -45,7 +45,7 @@ size_t Batch::_dataMax{ BATCH_PREALLOCATE_MIN };
size_t Batch::_objectsMax{ BATCH_PREALLOCATE_MIN };
size_t Batch::_drawCallInfosMax{ BATCH_PREALLOCATE_MIN };
Batch::Batch(const char* name) {
Batch::Batch(const std::string& name) {
_name = name;
_commands.reserve(_commandsMax);
_commandOffsets.reserve(_commandOffsetsMax);
@ -64,7 +64,7 @@ Batch::~Batch() {
_drawCallInfosMax = std::max(_drawCallInfos.size(), _drawCallInfosMax);
}
void Batch::setName(const char* name) {
void Batch::setName(const std::string& name) {
_name = name;
}
@ -96,7 +96,7 @@ void Batch::clear() {
_textureTables.clear();
_transforms.clear();
_name = nullptr;
_name.clear();
_invalidModel = true;
_currentModel = Transform();
_drawcallUniform = 0;

View file

@ -89,14 +89,14 @@ public:
void captureDrawCallInfo();
void captureNamedDrawCallInfo(std::string name);
Batch(const char* name = nullptr);
Batch(const std::string& name = "");
// Disallow copy construction and assignement of batches
Batch(const Batch& batch) = delete;
Batch& operator=(const Batch& batch) = delete;
~Batch();
void setName(const char* name);
const char* getName() const { return _name; }
void setName(const std::string& name);
const std::string& getName() const { return _name; }
void clear();
// Batches may need to override the context level stereo settings
@ -440,6 +440,18 @@ public:
};
};
using CommandHandler = std::function<void(Command, const Param*)>;
void forEachCommand(const CommandHandler& handler) const {
size_t count = _commands.size();
for (size_t i = 0; i < count; ++i) {
const auto command = _commands[i];
const auto offset = _commandOffsets[i];
const Param* params = _params.data() + offset;
handler(command, params);
}
}
typedef Cache<BufferPointer>::Vector BufferCaches;
typedef Cache<TexturePointer>::Vector TextureCaches;
typedef Cache<TextureTablePointer>::Vector TextureTableCaches;
@ -519,7 +531,7 @@ public:
bool _enableSkybox { false };
protected:
const char* _name;
std::string _name;
friend class Context;
friend class Frame;

View file

@ -149,7 +149,8 @@ protected:
Size _end{ 0 };
Sysmem _sysmem;
friend class Serializer;
friend class Deserializer;
friend class BufferView;
friend class Frame;
friend class Batch;

View file

@ -47,10 +47,7 @@ Context::Context(const Context& context) {
}
Context::~Context() {
for (auto batch : _batchPool) {
delete batch;
}
_batchPool.clear();
clearBatches();
_syncedPrograms.clear();
}
@ -97,6 +94,12 @@ FramePointer Context::endFrame() {
return result;
}
void Context::executeBatch(const char* name, std::function<void(Batch&)> lambda) const {
auto batch = acquireBatch(name);
lambda(*batch);
executeBatch(*batch);
}
void Context::executeBatch(Batch& batch) const {
PROFILE_RANGE(render_gpu, __FUNCTION__);
batch.flush();
@ -117,28 +120,27 @@ void Context::executeFrame(const FramePointer& frame) const {
PROFILE_RANGE(render_gpu, __FUNCTION__);
// Grab the stats at the around the frame and delta to have a consistent sampling
ContextStats beginStats;
static ContextStats beginStats;
getStats(beginStats);
// FIXME? probably not necessary, but safe
consumeFrameUpdates(frame);
_backend->setStereoState(frame->stereoState);
{
Batch beginBatch("Context::executeFrame::begin");
_frameRangeTimer->begin(beginBatch);
_backend->render(beginBatch);
// Execute the frame rendering commands
for (auto& batch : frame->batches) {
_backend->render(*batch);
}
Batch endBatch("Context::executeFrame::end");
_frameRangeTimer->end(endBatch);
_backend->render(endBatch);
executeBatch("Context::executeFrame::begin", [&](Batch& batch){
batch.pushProfileRange("Frame");
_frameRangeTimer->begin(batch);
});
// Execute the frame rendering commands
for (auto& batch : frame->batches) {
_backend->render(*batch);
}
executeBatch("Context::executeFrame::end", [&](Batch& batch){
batch.popProfileRange();
_frameRangeTimer->end(batch);
});
ContextStats endStats;
static ContextStats endStats;
getStats(endStats);
_frameStats.evalDelta(beginStats, endStats);
}
@ -381,6 +383,16 @@ void Context::processProgramsToSync() {
}
}
std::mutex Context::_batchPoolMutex;
std::list<Batch*> Context::_batchPool;
void Context::clearBatches() {
for (auto batch : _batchPool) {
delete batch;
}
_batchPool.clear();
}
BatchPointer Context::acquireBatch(const char* name) {
Batch* rawBatch = nullptr;
{
@ -393,8 +405,10 @@ BatchPointer Context::acquireBatch(const char* name) {
if (!rawBatch) {
rawBatch = new Batch();
}
rawBatch->setName(name);
return BatchPointer(rawBatch, [this](Batch* batch) { releaseBatch(batch); });
if (name) {
rawBatch->setName(name);
}
return BatchPointer(rawBatch, [](Batch* batch) { releaseBatch(batch); });
}
void Context::releaseBatch(Batch* batch) {
@ -406,7 +420,7 @@ void Context::releaseBatch(Batch* batch) {
void gpu::doInBatch(const char* name,
const std::shared_ptr<gpu::Context>& context,
const std::function<void(Batch& batch)>& f) {
auto batch = context->acquireBatch(name);
auto batch = Context::acquireBatch(name);
f(*batch);
context->appendFrameBatch(batch);
}

View file

@ -308,8 +308,8 @@ public:
void appendFrameBatch(const BatchPointer& batch);
FramePointer endFrame();
BatchPointer acquireBatch(const char* name = nullptr);
void releaseBatch(Batch* batch);
static BatchPointer acquireBatch(const char* name = nullptr);
static void releaseBatch(Batch* batch);
// MUST only be called on the rendering thread
//
@ -321,6 +321,11 @@ public:
// Execute a batch immediately, rather than as part of a frame
void executeBatch(Batch& batch) const;
// MUST only be called on the rendering thread
//
// Execute a batch immediately, rather than as part of a frame
void executeBatch(const char* name, std::function<void(Batch&)> lambda) const;
// MUST only be called on the rendering thread
//
// Executes a frame, applying any updates contained in the frame batches to the rendering
@ -413,8 +418,6 @@ protected:
Context(const Context& context);
std::shared_ptr<Backend> _backend;
std::mutex _batchPoolMutex;
std::list<Batch*> _batchPool;
bool _frameActive{ false };
FramePointer _currentFrame;
RangeTimerPointer _frameRangeTimer;
@ -431,6 +434,11 @@ protected:
static CreateBackend _createBackendCallback;
static std::once_flag _initialized;
// Should probably move this functionality to Batch
static void clearBatches();
static std::mutex _batchPoolMutex;
static std::list<Batch*> _batchPool;
friend class Shader;
friend class Backend;
};

View file

@ -322,7 +322,7 @@ public:
uint8 getLocationScalarCount() const { return DIMENSION_SCALAR_COUNT_PER_LOCATION[(Dimension)_dimension]; }
uint32 getLocationSize() const { return DIMENSION_SCALAR_COUNT_PER_LOCATION[_dimension] * TYPE_SIZE[_type]; }
uint16 getRaw() const { return *((uint16*) (this)); }
uint16 getRaw() const { return *((const uint16*) (this)); }
bool operator ==(const Element& right) const {

View file

@ -96,6 +96,7 @@ namespace gpu {
using TextureTablePointer = std::shared_ptr<TextureTable>;
struct StereoState {
StereoState() {}
bool isStereo() const {
return _enable && !_contextDisable;
}
@ -108,6 +109,9 @@ namespace gpu {
Mat4 _eyeProjections[2];
};
class Serializer;
class Deserializer;
class GPUObject {
public:
virtual ~GPUObject() = default;

View file

@ -42,6 +42,8 @@ namespace gpu {
FramebufferRecycler framebufferRecycler;
protected:
friend class Deserializer;
// Should be called once per frame, on the recording thred
void finish();
void preRender();

View file

@ -0,0 +1,29 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_gpu_FrameIO_h
#define hifi_gpu_FrameIO_h
#include "Forward.h"
#include "Format.h"
#include <functional>
namespace gpu {
using TextureCapturer = std::function<void(const std::string&, const TexturePointer&, uint16 layer)>;
using TextureLoader = std::function<void(const std::string&, const TexturePointer&, uint16 layer)>;
void writeFrame(const std::string& filename, const FramePointer& frame, const TextureCapturer& capturer = nullptr);
FramePointer readFrame(const std::string& filename, uint32_t externalTexture, const TextureLoader& loader = nullptr);
using IndexOptimizer = std::function<void(Primitive, uint32_t faceCount, uint32_t indexCount, uint32_t* indices )>;
void optimizeFrame(const std::string& filename, const IndexOptimizer& optimizer);
} // namespace gpu
#endif

View file

@ -0,0 +1,214 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_gpu_FrameIOKeys_h
#define hifi_gpu_FrameIOKeys_h
namespace gpu { namespace keys {
static const char* binary = "binary";
static const char* L00 = "L00";
static const char* L1m1 = "L1m1";
static const char* L10 = "L10";
static const char* L11 = "L11";
static const char* L2m2 = "L2m2";
static const char* L2m1 = "L2m1";
static const char* L20 = "L20";
static const char* L21 = "L21";
static const char* L22 = "L22";
static const char* eyeProjections = "eyeProjections";
static const char* eyeViews = "eyeViews";
static const char* alphaToCoverageEnable = "alphaToCoverageEnable";
static const char* antialisedLineEnable = "antialisedLineEnable";
static const char* attributes = "attributes";
static const char* batches = "batches";
static const char* blendFunction = "blendFunction";
static const char* borderColor = "borderColor";
static const char* bufferMask = "bufferMask";
static const char* buffers = "buffers";
static const char* capturedTextures = "capturedTextures";
static const char* channel = "channel";
static const char* colorAttachments = "colorAttachments";
static const char* colorWriteMask = "colorWriteMask";
static const char* commands = "commands";
static const char* comparisonFunction = "comparisonFunction";
static const char* cullMode = "cullMode";
static const char* data = "data";
static const char* depth = "depth";
static const char* depthBias = "depthBias";
static const char* depthBiasSlopeScale = "depthBiasSlopeScale";
static const char* depthClampEnable = "depthClampEnable";
static const char* depthStencilAttachment = "depthStencilAttachment";
static const char* depthTest = "depthTest";
static const char* drawCallInfos = "drawCallInfos";
static const char* drawcallUniform = "drawcallUniform";
static const char* drawcallUniformReset = "drawcallUniformReset";
static const char* element = "element";
static const char* fillMode = "fillMode";
static const char* filter = "filter";
static const char* formats = "formats";
static const char* frameIndex = "frameIndex";
static const char* framebuffer = "framebuffer";
static const char* framebuffers = "framebuffers";
static const char* frequency = "frequency";
static const char* frontFaceClockwise = "frontFaceClockwise";
static const char* height = "height";
static const char* id = "id";
static const char* ktxFile = "ktxFile";
static const char* layers = "layers";
static const char* maxAnisotropy = "maxAnisotropy";
static const char* maxMip = "maxMip";
static const char* minMip = "minMip";
static const char* mipOffset = "mipOffset";
static const char* mips = "mips";
static const char* multisampleEnable = "multisampleEnable";
static const char* name = "name";
static const char* namedData = "namedData";
static const char* names = "names";
static const char* objects = "objects";
static const char* offset = "offset";
static const char* pipelines = "pipelines";
static const char* pose = "pose";
static const char* profileRanges = "profileRanges";
static const char* program = "program";
static const char* programs = "programs";
static const char* projectionJitter = "projectionJitter";
static const char* queries = "queries";
static const char* sampleCount = "sampleCount";
static const char* sampleMask = "sampleMask";
static const char* sampler = "sampler";
static const char* samples = "samples";
static const char* scissorEnable = "scissorEnable";
static const char* shaders = "shaders";
static const char* size = "size";
static const char* skybox = "skybox";
static const char* slot = "slot";
static const char* source = "source";
static const char* state = "state";
static const char* stencilActivation = "stencilActivation";
static const char* stencilTestBack = "stencilTestBack";
static const char* stencilTestFront = "stencilTestFront";
static const char* stereo = "stereo";
static const char* subresource = "subresource";
static const char* swapchains = "swapchains";
static const char* texelFormat = "texelFormat";
static const char* texture = "texture";
static const char* textureTables = "textureTables";
static const char* textures = "textures";
static const char* transforms = "transforms";
static const char* type = "type";
static const char* usageType = "usageType";
static const char* view = "view";
static const char* width = "width";
static const char* wrapModeU = "wrapModeU";
static const char* wrapModeV = "wrapModeV";
static const char* wrapModeW = "wrapModeW";
static const char* backWriteMask = "backWriteMask";
static const char* frontWriteMask = "frontWriteMask";
static const char* reference = "reference";
static const char* readMask = "readMask";
static const char* failOp = "failOp";
static const char* depthFailOp = "depthFailOp";
static const char* passOp = "passOp";
static const char* enabled = "enabled";
static const char* blend = "blend";
static const char* flags = "flags";
static const char* writeMask = "writeMask";
static const char* function = "function";
static const char* sourceColor = "sourceColor";
static const char* sourceAlpha = "sourceAlpha";
static const char* destColor = "destColor";
static const char* destAlpha = "destAlpha";
static const char* opColor = "opColor";
static const char* opAlpha = "opAlpha";
static const char* enable = "enable";
static const char* contextDisable = "contextDisable";
static const char* COMMAND_NAMES[] = {
"draw",
"drawIndexed",
"drawInstanced",
"drawIndexedInstanced",
"multiDrawIndirect",
"multiDrawIndexedIndirect",
"setInputFormat",
"setInputBuffer",
"setIndexBuffer",
"setIndirectBuffer",
"setModelTransform",
"setViewTransform",
"setProjectionTransform",
"setProjectionJitter",
"setViewportTransform",
"setDepthRangeTransform",
"setPipeline",
"setStateBlendFactor",
"setStateScissorRect",
"setUniformBuffer",
"setResourceBuffer",
"setResourceTexture",
"setResourceTextureTable",
"setResourceFramebufferSwapChainTexture",
"setFramebuffer",
"setFramebufferSwapChain",
"clearFramebuffer",
"blit",
"generateTextureMips",
"generateTextureMipsWithPipeline",
"advance",
"beginQuery",
"endQuery",
"getQuery",
"resetStages",
"disableContextViewCorrection",
"restoreContextViewCorrection",
"disableContextStereo",
"restoreContextStereo",
"runLambda",
"startNamedCall",
"stopNamedCall",
"glUniform1i",
"glUniform1f",
"glUniform2f",
"glUniform3f",
"glUniform4f",
"glUniform3fv",
"glUniform4fv",
"glUniform4iv",
"glUniformMatrix3fv",
"glUniformMatrix4fv",
"glColor4f",
"pushProfileRange",
"popProfileRange",
};
template<class T, size_t N>
constexpr size_t array_size(T (&)[N]) { return N; }
static_assert(array_size(COMMAND_NAMES) == Batch::Command::NUM_COMMANDS, "Command array sizes must match");
}} // namespace gpu::keys
#endif

View file

@ -0,0 +1,906 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "FrameIO.h"
#include <nlohmann/json.hpp>
#include <unordered_map>
#include <QtCore/QFileInfo>
#include <QtCore/QDir>
#include <ktx/KTX.h>
#include "Frame.h"
#include "Batch.h"
#include "TextureTable.h"
#include "FrameIOKeys.h"
namespace gpu {
using json = nlohmann::json;
class Deserializer {
public:
static std::string getBaseName(const std::string& filename) {
static const std::string ext{ ".json" };
if (std::string::npos != filename.rfind(ext)) {
return filename.substr(0, filename.size() - ext.size());
}
return filename;
}
Deserializer(const std::string& filename, uint32_t externalTexture, const TextureLoader& loader) :
basename(getBaseName(filename)), externalTexture(externalTexture), textureLoader(loader) {}
const std::string basename;
std::string basedir;
std::string binaryFile;
const uint32_t externalTexture;
TextureLoader textureLoader;
std::vector<ShaderPointer> shaders;
std::vector<ShaderPointer> programs;
std::vector<TexturePointer> textures;
std::vector<TextureTablePointer> textureTables;
std::vector<BufferPointer> buffers;
std::unordered_map<BufferPointer, size_t> bufferOffsets;
std::vector<Stream::FormatPointer> formats;
std::vector<PipelinePointer> pipelines;
std::vector<FramebufferPointer> framebuffers;
std::vector<SwapChainPointer> swapchains;
std::vector<QueryPointer> queries;
json frameNode;
FramePointer readFrame();
void optimizeFrame(const IndexOptimizer& optimizer);
FramePointer deserializeFrame();
void readBuffers(const json& node);
template <typename T>
static std::vector<T> readArray(const json& node, const std::string& name, std::function<T(const json& node)> parser) {
std::vector<T> result;
if (node.count(name)) {
const auto& sourceArrayNode = node[name];
result.reserve(sourceArrayNode.size());
for (const auto& sourceNode : sourceArrayNode) {
if (sourceNode.is_null()) {
result.push_back(nullptr);
continue;
}
result.push_back(parser(sourceNode));
}
}
return result;
}
template <typename T>
static std::vector<T> readNumericVector(const json& node) {
auto count = node.size();
std::vector<T> result;
result.resize(count);
for (size_t i = 0; i < count; ++i) {
result[i] = node[i];
}
return result;
}
template <size_t N>
static void readFloatArray(const json& node, float* out) {
for (size_t i = 0; i < N; ++i) {
out[i] = node[i].operator float();
}
}
template <typename T>
static bool readOptionalTransformed(T& dest, const json& node, const std::string& name, std::function<T(const json&)> f) {
if (node.count(name)) {
dest = f(node[name]);
return true;
}
return false;
}
template <typename T>
static bool readOptionalVectorTransformed(std::vector<T>& dest,
const json& node,
const std::string& name,
std::function<T(const json&)> f) {
if (node.count(name)) {
const auto& arrayNode = node[name];
const auto count = arrayNode.size();
dest.reserve(count);
for (size_t i = 0; i < count; ++i) {
dest.emplace_back(f(arrayNode[i]));
}
return true;
}
return false;
}
template <typename T>
static bool readOptionalVector(std::vector<T>& dest, const json& node, const std::string& name) {
return readOptionalVectorTransformed(dest, node, name, [](const json& node) { return node.get<T>(); });
}
template <typename T>
static T defaultNodeTransform(const json& node) {
return node.get<T>();
}
template <typename T, typename TT = T>
static bool readBatchCacheTransformed(typename Batch::Cache<T>::Vector& dest,
const json& node,
const std::string& name,
std::function<TT(const json&)> f = [](const json& node) -> TT {
return node.get<TT>();
}) {
if (node.count(name)) {
const auto& arrayNode = node[name];
for (const auto& entry : arrayNode) {
dest.cache(f(entry));
}
return true;
}
return false;
}
template <typename T>
static bool readPointerCache(typename Batch::Cache<T>::Vector& dest,
const json& node,
const std::string& name,
std::vector<T>& global) {
auto transform = [&](const json& node) -> const T& { return global[node.get<uint32_t>()]; };
return readBatchCacheTransformed<T, const T&>(dest, node, name, transform);
}
template <typename T>
static bool readOptional(T& dest, const json& node, const std::string& name) {
return readOptionalTransformed<T>(dest, node, name, [](const json& child) {
T result = child;
return result;
});
}
SwapChainPointer readSwapchain(const json& node);
ShaderPointer readProgram(const json& node);
PipelinePointer readPipeline(const json& node);
TextureTablePointer readTextureTable(const json& node);
TextureView readTextureView(const json& node);
FramebufferPointer readFramebuffer(const json& node);
BatchPointer readBatch(const json& node);
Batch::NamedBatchData readNamedBatchData(const json& node);
//static StatePointer readState(const json& node);
static QueryPointer readQuery(const json& node);
TexturePointer readTexture(const json& node, uint32_t externalTexture);
static ShaderPointer readShader(const json& node);
static Stream::FormatPointer readFormat(const json& node);
static Element readElement(const json& node);
static Sampler readSampler(const json& node);
static glm::mat4 readMat4(const json& node) {
glm::mat4 m;
if (!node.is_null()) {
readFloatArray<16>(node, &m[0][0]);
}
return m;
}
static glm::vec4 readVec4(const json& node) {
glm::vec4 v;
if (!node.is_null()) {
readFloatArray<4>(node, &v[0]);
}
return v;
}
static glm::vec3 readVec3(const json& node) {
glm::vec3 v;
if (!node.is_null()) {
readFloatArray<3>(node, &v[0]);
}
return v;
}
static glm::vec2 readVec2(const json& node) {
glm::vec2 v;
if (!node.is_null()) {
readFloatArray<2>(node, &v[0]);
}
return v;
}
static Transform readTransform(const json& node) { return Transform{ readMat4(node) }; }
static std::vector<uint8_t> fromBase64(const json& node);
static void readCommand(const json& node, Batch& batch);
};
FramePointer readFrame(const std::string& filename, uint32_t externalTexture, const TextureLoader& loader) {
return Deserializer(filename, externalTexture, loader).readFrame();
}
void optimizeFrame(const std::string& filename, const IndexOptimizer& optimizer) {
return Deserializer(filename, 0, {}).optimizeFrame(optimizer);
}
} // namespace gpu
using namespace gpu;
void Deserializer::readBuffers(const json& buffersNode) {
storage::FileStorage mappedFile(binaryFile.c_str());
const auto mappedSize = mappedFile.size();
const auto* mapped = mappedFile.data();
size_t bufferCount = buffersNode.size();
buffers.reserve(buffersNode.size());
size_t offset = 0;
for (size_t i = 0; i < bufferCount; ++i) {
const auto& bufferNode = buffersNode[i];
if (bufferNode.is_null()) {
buffers.push_back(nullptr);
continue;
}
size_t size = bufferNode;
if (offset + size > mappedSize) {
throw std::runtime_error("read buffer error");
}
buffers.push_back(std::make_shared<Buffer>(size, mapped + offset));
bufferOffsets[buffers.back()] = offset;
offset += size;
}
}
Element Deserializer::readElement(const json& node) {
Element result;
if (!node.is_null()) {
*((uint16*)&result) = node;
}
return result;
}
Sampler Deserializer::readSampler(const json& node) {
Sampler result;
if (!node.is_null()) {
if (node.count(keys::borderColor)) {
result._desc._borderColor = readVec4(node[keys::borderColor]);
}
if (node.count(keys::maxAnisotropy)) {
result._desc._maxAnisotropy = node[keys::maxAnisotropy];
}
if (node.count(keys::wrapModeU)) {
result._desc._wrapModeU = node[keys::wrapModeU];
}
if (node.count(keys::wrapModeV)) {
result._desc._wrapModeV = node[keys::wrapModeV];
}
if (node.count(keys::wrapModeW)) {
result._desc._wrapModeW = node[keys::wrapModeW];
}
if (node.count(keys::filter)) {
result._desc._filter = node[keys::filter];
}
if (node.count(keys::comparisonFunction)) {
result._desc._comparisonFunc = node[keys::comparisonFunction];
}
if (node.count(keys::minMip)) {
result._desc._minMip = node[keys::minMip];
}
if (node.count(keys::maxMip)) {
result._desc._maxMip = node[keys::maxMip];
}
if (node.count(keys::mipOffset)) {
result._desc._mipOffset = node[keys::mipOffset];
}
}
return result;
}
TexturePointer Deserializer::readTexture(const json& node, uint32_t external) {
if (node.is_null()) {
return nullptr;
}
TextureUsageType usageType = node[keys::usageType];
Texture::Type type = node[keys::type];
glm::u16vec4 dims;
dims.x = node[keys::width];
dims.y = node[keys::height];
dims.z = node[keys::depth];
dims.w = node[keys::layers];
uint16 mips = node[keys::mips];
uint16 samples = node[keys::samples];
Element texelFormat = readElement(node[keys::texelFormat]);
Sampler sampler;
readOptionalTransformed<Sampler>(sampler, node, keys::sampler, [](const json& node) { return readSampler(node); });
TexturePointer result;
if (usageType == TextureUsageType::EXTERNAL) {
result = Texture::createExternal([](uint32_t, void*) {});
result->setExternalTexture(external, nullptr);
} else {
result = Texture::create(usageType, type, texelFormat, dims.x, dims.y, dims.z, samples, dims.w, mips, sampler);
}
auto& texture = *result;
readOptional(texture._source, node, keys::source);
std::string ktxFile;
readOptional(ktxFile, node, keys::ktxFile);
if (!ktxFile.empty()) {
if (QFileInfo(ktxFile.c_str()).isRelative()) {
ktxFile = basedir + "/" + ktxFile;
}
texture.setKtxBacking(ktxFile);
}
return result;
}
SwapChainPointer Deserializer::readSwapchain(const json& node) {
if (node.is_null()) {
return nullptr;
}
uint8_t swapChainSize = node[keys::size];
std::vector<FramebufferPointer> swapChainFramebuffers;
const auto& framebuffersNode = node[keys::framebuffers];
swapChainFramebuffers.resize(swapChainSize);
for (uint8_t i = 0; i < swapChainSize; ++i) {
auto index = framebuffersNode[i].get<uint32_t>();
swapChainFramebuffers[i] = framebuffers[index];
}
return std::make_shared<FramebufferSwapChain>(swapChainFramebuffers);
}
ShaderPointer Deserializer::readShader(const json& node) {
if (node.is_null()) {
return nullptr;
}
// FIXME support procedural shaders
Shader::Type type = node[keys::type];
uint32_t id = node[keys::id];
ShaderPointer result;
switch (type) {
//case Shader::Type::GEOMETRY:
// result = Shader::createGeometry(id);
// break;
case Shader::Type::VERTEX:
result = Shader::createVertex(id);
break;
case Shader::Type::FRAGMENT:
result = Shader::createPixel(id);
break;
default:
throw std::runtime_error("not implemented");
}
return result;
}
ShaderPointer Deserializer::readProgram(const json& node) {
if (node.is_null()) {
return nullptr;
}
std::vector<ShaderPointer> programShaders;
programShaders.reserve(node.size());
for (const auto& shaderRef : node) {
uint32_t shaderIndex = shaderRef;
programShaders.push_back(this->shaders[shaderIndex]);
}
// FIXME account for geometry and compute shaders?
return Shader::createProgram(programShaders[0], programShaders[1]);
}
static State::Flags readStateFlags(const json& node) {
State::Flags result;
// Hacky implementation because you can't pass boolean bitfields as references
bool value;
if (Deserializer::readOptional(value, node, keys::alphaToCoverageEnable)) {
result.alphaToCoverageEnable = value;
}
if (Deserializer::readOptional(value, node, keys::frontFaceClockwise)) {
result.frontFaceClockwise = value;
}
if (Deserializer::readOptional(value, node, keys::depthClampEnable)) {
result.depthClampEnable = value;
}
if (Deserializer::readOptional(value, node, keys::scissorEnable)) {
result.scissorEnable = value;
}
if (Deserializer::readOptional(value, node, keys::multisampleEnable)) {
result.multisampleEnable = value;
}
if (Deserializer::readOptional(value, node, keys::antialisedLineEnable)) {
result.antialisedLineEnable = value;
}
if (Deserializer::readOptional(value, node, keys::alphaToCoverageEnable)) {
result.alphaToCoverageEnable = value;
}
return result;
}
static State::BlendFunction readBlendFunction(const json& node) {
State::BlendFunction result;
uint16 enabled;
State::BlendArg blendArg;
State::BlendOp blendOp;
if (Deserializer::readOptional(enabled, node, keys::enabled)) {
result.enabled = enabled;
}
if (Deserializer::readOptional(blendArg, node, keys::sourceColor)) {
result.sourceColor = blendArg;
}
if (Deserializer::readOptional(blendArg, node, keys::sourceAlpha)) {
result.sourceAlpha = blendArg;
}
if (Deserializer::readOptional(blendArg, node, keys::destColor)) {
result.destColor = blendArg;
}
if (Deserializer::readOptional(blendArg, node, keys::destAlpha)) {
result.destAlpha = blendArg;
}
if (Deserializer::readOptional(blendOp, node, keys::opAlpha)) {
result.opAlpha = blendOp;
}
if (Deserializer::readOptional(blendOp, node, keys::opColor)) {
result.opColor = blendOp;
}
return result;
}
static State::DepthTest readDepthTest(const json& node) {
State::DepthTest result;
Deserializer::readOptional(result.writeMask, node, keys::writeMask);
Deserializer::readOptional(result.enabled, node, keys::enabled);
Deserializer::readOptional(result.function, node, keys::function);
return result;
}
static State::StencilTest readStencilTest(const json& node) {
State::StencilTest result;
State::ComparisonFunction compareOp;
State::StencilOp stencilOp;
if (Deserializer::readOptional(compareOp, node, keys::function)) {
result.function = compareOp;
}
if (Deserializer::readOptional(stencilOp, node, keys::failOp)) {
result.failOp = stencilOp;
}
if (Deserializer::readOptional(stencilOp, node, keys::depthFailOp)) {
result.depthFailOp = stencilOp;
}
if (Deserializer::readOptional(stencilOp, node, keys::passOp)) {
result.passOp = stencilOp;
}
if (Deserializer::readOptional(compareOp, node, keys::function)) {
result.function = compareOp;
}
Deserializer::readOptional(result.reference, node, keys::reference);
Deserializer::readOptional(result.readMask, node, keys::readMask);
return result;
}
static State::StencilActivation readStencilActivation(const json& node) {
auto jsonString = node.dump(2);
State::StencilActivation result;
bool enabled;
if (Deserializer::readOptional(enabled, node, keys::enabled)) {
result.enabled = enabled;
}
Deserializer::readOptional(result.frontWriteMask, node, keys::frontWriteMask);
Deserializer::readOptional(result.backWriteMask, node, keys::backWriteMask);
return result;
}
StatePointer readState(const json& node) {
if (node.is_null()) {
return nullptr;
}
State::Data data;
Deserializer::readOptionalTransformed<State::Flags>(data.flags, node, keys::flags, &readStateFlags);
Deserializer::readOptionalTransformed<State::BlendFunction>(data.blendFunction, node, keys::blendFunction, &readBlendFunction);
Deserializer::readOptionalTransformed<State::DepthTest>(data.depthTest, node, keys::depthTest, &readDepthTest);
Deserializer::readOptionalTransformed<State::StencilActivation>(data.stencilActivation, node, keys::stencilActivation, &readStencilActivation);
Deserializer::readOptionalTransformed<State::StencilTest>(data.stencilTestFront, node, keys::stencilTestFront, &readStencilTest);
Deserializer::readOptionalTransformed<State::StencilTest>(data.stencilTestBack, node, keys::stencilTestBack, &readStencilTest);
Deserializer::readOptional(data.colorWriteMask, node, keys::colorWriteMask);
Deserializer::readOptional(data.cullMode, node, keys::cullMode);
Deserializer::readOptional(data.depthBias, node, keys::depthBias);
Deserializer::readOptional(data.depthBiasSlopeScale, node, keys::depthBiasSlopeScale);
Deserializer::readOptional(data.fillMode, node, keys::fillMode);
Deserializer::readOptional(data.sampleMask, node, keys::sampleMask);
return std::make_shared<State>(data);
}
PipelinePointer Deserializer::readPipeline(const json& node) {
if (node.is_null()) {
return nullptr;
}
auto state = readState(node[keys::state]);
uint32_t programIndex = node[keys::program];
auto program = programs[programIndex];
return Pipeline::create(program, state);
}
Stream::FormatPointer Deserializer::readFormat(const json& node) {
if (node.is_null()) {
return nullptr;
}
auto result = std::make_shared<Stream::Format>();
auto& format = *result;
const auto& attributesNode = node[keys::attributes];
for (const auto& attributeNode : attributesNode) {
uint8_t slot = attributeNode[keys::slot];
auto& attribute = format._attributes[slot];
attribute._slot = slot;
attribute._channel = attributeNode[keys::channel];
readOptionalTransformed<Element>(attribute._element, attributeNode, keys::element,
[](const json& node) { return readElement(node); });
readOptional(attribute._frequency, attributeNode, keys::frequency);
readOptional(attribute._offset, attributeNode, keys::offset);
}
format.evaluateCache();
return result;
}
TextureTablePointer Deserializer::readTextureTable(const json& node) {
if (node.is_null()) {
return nullptr;
}
TextureTablePointer result = std::make_shared<TextureTable>();
auto& table = *result;
auto count = node.size();
for (size_t i = 0; i < count; ++i) {
uint32_t index = node[i];
table.setTexture(i, textures[index]);
}
return result;
}
TextureView Deserializer::readTextureView(const json& node) {
TextureView result;
auto texturePointerReader = [this](const json& node) {
uint32_t textureIndex = node;
return textures[textureIndex];
};
readOptionalTransformed<TexturePointer>(result._texture, node, keys::texture, texturePointerReader);
readOptionalTransformed<Element>(result._element, node, keys::element, &readElement);
readOptional(result._subresource, node, keys::subresource);
return result;
}
FramebufferPointer Deserializer::readFramebuffer(const json& node) {
if (node.is_null()) {
return nullptr;
}
FramebufferPointer result;
{
std::string name;
readOptional(name, node, keys::name);
result.reset(Framebuffer::create(name));
}
auto& framebuffer = *result;
readOptional(framebuffer._bufferMask, node, keys::bufferMask);
readOptional(framebuffer._height, node, keys::height);
readOptional(framebuffer._width, node, keys::width);
readOptional(framebuffer._numSamples, node, keys::sampleCount);
auto textureViewReader = [this](const json& node) -> TextureView { return readTextureView(node); };
readOptionalTransformed<TextureView>(framebuffer._depthStencilBuffer, node, keys::depthStencilAttachment,
textureViewReader);
if (framebuffer._depthStencilBuffer) {
framebuffer._depthStamp++;
}
if (node.count(keys::colorAttachments)) {
const auto& colorAttachmentsNode = node[keys::colorAttachments];
size_t count = colorAttachmentsNode.size();
for (size_t i = 0; i < count; ++i) {
const auto& colorAttachmentNode = colorAttachmentsNode[i];
if (colorAttachmentNode.is_null()) {
continue;
}
framebuffer._renderBuffers[i] = readTextureView(colorAttachmentNode);
framebuffer._colorStamps[i]++;
}
}
return result;
}
QueryPointer Deserializer::readQuery(const json& node) {
if (node.is_null()) {
return nullptr;
}
std::string name = node[keys::name];
return std::make_shared<Query>([](const Query&) {}, name);
}
std::vector<uint8_t> Deserializer::fromBase64(const json& node) {
std::vector<uint8_t> result;
auto decoded = QByteArray::fromBase64(QByteArray{ node.get<std::string>().c_str() });
result.resize(decoded.size());
memcpy(result.data(), decoded.data(), decoded.size());
return result;
}
static std::unordered_map<std::string, Batch::Command> getCommandNameMap() {
static std::unordered_map<std::string, Batch::Command> result;
if (result.empty()) {
for (Batch::Command i = Batch::COMMAND_draw; i < Batch::NUM_COMMANDS; i = (Batch::Command)(i + 1)) {
result[keys::COMMAND_NAMES[i]] = i;
}
}
return result;
}
void Deserializer::readCommand(const json& commandNode, Batch& batch) {
size_t count = commandNode.size();
std::string commandName = commandNode[0];
Batch::Command command = getCommandNameMap()[commandName];
batch._commands.push_back(command);
batch._commandOffsets.push_back(batch._params.size());
for (size_t i = 1; i < count; ++i) {
batch._params.emplace_back(commandNode[i].get<size_t>());
}
}
Batch::NamedBatchData Deserializer::readNamedBatchData(const json& node) {
Batch::NamedBatchData result;
readOptionalVectorTransformed<BufferPointer>(result.buffers, node, keys::buffers, [this](const json& node) {
uint32_t index = node;
return buffers[index];
});
readOptionalVectorTransformed<Batch::DrawCallInfo>(result.drawCallInfos, node, keys::drawCallInfos,
[](const json& node) -> Batch::DrawCallInfo {
Batch::DrawCallInfo result{ 0 };
*((uint32_t*)&result) = node;
return result;
});
return result;
}
BatchPointer Deserializer::readBatch(const json& node) {
if (node.is_null()) {
return nullptr;
}
std::string batchName;
if (node.count(keys::name)) {
batchName = node[keys::name];
}
BatchPointer result = std::make_shared<Batch>(batchName);
auto& batch = *result;
readOptional(batch._enableStereo, node, keys::stereo);
readOptional(batch._enableSkybox, node, keys::skybox);
readOptionalTransformed<glm::vec2>(batch._projectionJitter, node, keys::projectionJitter, &readVec2);
readOptional(batch._drawcallUniform, node, keys::drawcallUniform);
readOptional(batch._drawcallUniformReset, node, keys::drawcallUniformReset);
readPointerCache(batch._textures, node, keys::textures, textures);
readPointerCache(batch._textureTables, node, keys::textureTables, textureTables);
readPointerCache(batch._buffers, node, keys::buffers, buffers);
readPointerCache(batch._pipelines, node, keys::pipelines, pipelines);
readPointerCache(batch._streamFormats, node, keys::formats, formats);
readPointerCache(batch._framebuffers, node, keys::framebuffers, framebuffers);
readPointerCache(batch._swapChains, node, keys::swapchains, swapchains);
readPointerCache(batch._queries, node, keys::queries, queries);
readOptionalVectorTransformed<Batch::DrawCallInfo>(batch._drawCallInfos, node, keys::drawCallInfos,
[](const json& node) -> Batch::DrawCallInfo {
Batch::DrawCallInfo result{ 0 };
*((uint32_t*)&result) = node;
return result;
});
readOptionalTransformed<std::vector<uint8_t>>(batch._data, node, keys::data,
[](const json& node) { return fromBase64(node); });
for (const auto& commandNode : node[keys::commands]) {
readCommand(commandNode, batch);
}
readBatchCacheTransformed<Transform, Transform>(batch._transforms, node, keys::transforms, &readTransform);
readBatchCacheTransformed<std::string>(batch._profileRanges, node, keys::profileRanges);
readBatchCacheTransformed<std::string>(batch._names, node, keys::names);
auto objectTransformReader = [](const json& node) -> Batch::TransformObject {
Batch::TransformObject result;
result._model = readMat4(node);
result._modelInverse = glm::inverse(result._model);
return result;
};
readOptionalVectorTransformed<Batch::TransformObject>(batch._objects, node, keys::objects, objectTransformReader);
if (node.count(keys::namedData)) {
const auto& namedDataNode = node[keys::namedData];
for (auto itr = namedDataNode.begin(); itr != namedDataNode.end(); ++itr) {
auto name = itr.key();
batch._namedData[name] = readNamedBatchData(itr.value());
}
}
return result;
}
StereoState readStereoState(const json& node) {
StereoState result;
Deserializer::readOptional(result._enable, node, keys::enable);
Deserializer::readOptional(result._contextDisable, node, keys::contextDisable);
Deserializer::readOptional(result._skybox, node, keys::skybox);
if (node.count(keys::eyeProjections)) {
auto projections = node[keys::eyeProjections];
result._eyeProjections[0] = Deserializer::readMat4(projections[0]);
result._eyeProjections[1] = Deserializer::readMat4(projections[1]);
}
if (node.count(keys::eyeViews)) {
auto views = node[keys::eyeViews];
result._eyeViews[0] = Deserializer::readMat4(views[0]);
result._eyeViews[1] = Deserializer::readMat4(views[1]);
}
return result;
}
FramePointer Deserializer::deserializeFrame() {
{
std::string filename{ basename + ".json" };
if (0 == basename.find("assets:")) {
auto lastSlash = basename.rfind('/');
basedir = basename.substr(0, lastSlash);
} else {
basedir = QFileInfo(basename.c_str()).absolutePath().toStdString();
}
storage::FileStorage mappedFile(filename.c_str());
frameNode = json::parse(std::string((const char*)mappedFile.data(), mappedFile.size()));
}
FramePointer result = std::make_shared<Frame>();
auto& frame = *result;
if (frameNode[keys::binary].is_string()) {
binaryFile = frameNode[keys::binary];
if (QFileInfo(binaryFile.c_str()).isRelative()) {
binaryFile = basedir + "/" + binaryFile;
}
} else {
binaryFile = basename + ".bin";
}
if (frameNode.count(keys::buffers)) {
readBuffers(frameNode[keys::buffers]);
}
shaders = readArray<ShaderPointer>(frameNode, keys::shaders, [](const json& node) { return readShader(node); });
// Must come after shaders
programs = readArray<ShaderPointer>(frameNode, keys::programs, [this](const json& node) { return readProgram(node); });
// Must come after programs
pipelines = readArray<PipelinePointer>(frameNode, keys::pipelines, [this](const json& node) { return readPipeline(node); });
formats = readArray<Stream::FormatPointer>(frameNode, keys::formats, [](const json& node) { return readFormat(node); });
auto textureReader = [this](const json& node) { return readTexture(node, externalTexture); };
textures = readArray<TexturePointer>(frameNode, keys::textures, textureReader);
if (textureLoader) {
std::vector<uint32_t> capturedTextures = readNumericVector<uint32_t>(frameNode[keys::capturedTextures]);
for (const auto& index : capturedTextures) {
const auto& texturePointer = textures[index];
uint16 layers = std::max<uint16>(texturePointer->getNumSlices(), 1);
for (uint16 layer = 0; layer < layers; ++layer) {
std::string filename = basename + "." + std::to_string(index) + "." + std::to_string(layer) + ".png";
textureLoader(filename, texturePointer, layer);
}
}
}
// Must come after textures
auto textureTableReader = [this](const json& node) { return readTextureTable(node); };
textureTables = readArray<TextureTablePointer>(frameNode, keys::textureTables, textureTableReader);
// Must come after textures
auto framebufferReader = [this](const json& node) { return readFramebuffer(node); };
framebuffers = readArray<FramebufferPointer>(frameNode, keys::framebuffers, framebufferReader);
// Must come after textures & framebuffers
swapchains =
readArray<SwapChainPointer>(frameNode, keys::swapchains, [this](const json& node) { return readSwapchain(node); });
queries = readArray<QueryPointer>(frameNode, keys::queries, [this](const json& node) { return readQuery(node); });
frame.framebuffer = framebuffers[frameNode[keys::framebuffer].get<uint32_t>()];
frame.view = readMat4(frameNode[keys::view]);
frame.pose = readMat4(frameNode[keys::pose]);
frame.frameIndex = frameNode[keys::frameIndex];
frame.stereoState = readStereoState(frameNode[keys::stereo]);
if (frameNode.count(keys::batches)) {
for (const auto& batchNode : frameNode[keys::batches]) {
frame.batches.push_back(readBatch(batchNode));
}
}
return result;
}
FramePointer Deserializer::readFrame() {
auto result = deserializeFrame();
result->finish();
return result;
}
void Deserializer::optimizeFrame(const IndexOptimizer& optimizer) {
auto result = deserializeFrame();
auto& frame = *result;
// optimize the index buffers?
struct CurrentIndexBuffer {
Offset offset{ 0 };
BufferPointer buffer;
Type type{ gpu::Type::INT32 };
Primitive primitve{ Primitive::TRIANGLES };
uint32_t numIndices{ 0 };
uint32_t startIndex{ 0 };
};
std::vector<CurrentIndexBuffer> captured;
for (auto& batch : frame.batches) {
CurrentIndexBuffer currentIndexBuffer;
batch->forEachCommand([&](Batch::Command cmd, const Batch::Param* params){
switch(cmd) {
case Batch::Command::COMMAND_setIndexBuffer:
currentIndexBuffer.offset = params[0]._size;
currentIndexBuffer.buffer = batch->_buffers.get(params[1]._int);
currentIndexBuffer.type = (Type)params[2]._int;
break;
case Batch::Command::COMMAND_drawIndexed:
currentIndexBuffer.startIndex = params[0]._int;
currentIndexBuffer.numIndices = params[1]._int;
currentIndexBuffer.primitve = (Primitive)params[2]._int;
captured.emplace_back(currentIndexBuffer);
break;
case Batch::Command::COMMAND_drawIndexedInstanced:
currentIndexBuffer.startIndex = params[1]._int;
currentIndexBuffer.numIndices = params[2]._int;
currentIndexBuffer.primitve = (Primitive)params[3]._int;
captured.emplace_back(currentIndexBuffer);
break;
default:
break;
}
});
}
std::string optimizedBinaryFile = basename + "_optimized.bin";
QFile(binaryFile.c_str()).copy(optimizedBinaryFile.c_str());
{
storage::FileStorage mappedFile(optimizedBinaryFile.c_str());
std::set<BufferPointer> uniqueBuffers;
for (const auto& capturedIndexData : captured) {
if (uniqueBuffers.count(capturedIndexData.buffer)) {
continue;
}
uniqueBuffers.insert(capturedIndexData.buffer);
auto bufferOffset = bufferOffsets[capturedIndexData.buffer];
auto& buffer = *capturedIndexData.buffer;
const auto& count = capturedIndexData.numIndices;
auto indices = (uint32_t*)buffer.editData();
optimizer(capturedIndexData.primitve, count / 3, count, indices);
memcpy(mappedFile.mutableData() + bufferOffset, indices, sizeof(uint32_t) * count);
}
}
frameNode[keys::binary] = optimizedBinaryFile;
{
std::string frameJson = frameNode.dump();
std::string filename = basename + "_optimized.json";
storage::FileStorage::create(filename.c_str(), frameJson.size(), (const uint8_t*)frameJson.data());
}
}

View file

@ -0,0 +1,844 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "FrameIO.h"
#include "Frame.h"
#include "Batch.h"
#include "TextureTable.h"
#include <nlohmann/json.hpp>
#include <unordered_map>
#include "FrameIOKeys.h"
namespace gpu {
using json = nlohmann::json;
class Serializer {
public:
const std::string basename;
const TextureCapturer textureCapturer;
std::unordered_map<ShaderPointer, uint32_t> shaderMap;
std::unordered_map<ShaderPointer, uint32_t> programMap;
std::unordered_map<TexturePointer, uint32_t> textureMap;
std::unordered_map<TextureTablePointer, uint32_t> textureTableMap;
std::unordered_map<BufferPointer, uint32_t> bufferMap;
std::unordered_map<Stream::FormatPointer, uint32_t> formatMap;
std::unordered_map<PipelinePointer, uint32_t> pipelineMap;
std::unordered_map<FramebufferPointer, uint32_t> framebufferMap;
std::unordered_map<SwapChainPointer, uint32_t> swapchainMap;
std::unordered_map<QueryPointer, uint32_t> queryMap;
Serializer(const std::string& basename, const TextureCapturer& capturer) : basename(basename), textureCapturer(capturer) {}
template <typename T>
static uint32_t getGlobalIndex(const T& value, std::unordered_map<T, uint32_t>& map) {
if (map.count(value) == 0) {
uint32_t result = (uint32_t)map.size();
map[value] = result;
return result;
}
return map[value];
}
template <typename T>
static json serializePointerCache(const typename Batch::Cache<T>::Vector& cache, std::unordered_map<T, uint32_t>& map) {
json result = json::array();
const auto count = cache._items.size();
for (uint32_t i = 0; i < count; ++i) {
const auto& cacheEntry = cache._items[i];
const auto& val = cacheEntry._data;
result.push_back(getGlobalIndex(val, map));
}
return result;
}
template <typename T, typename TT = const T&>
static json serializeDataCache(const typename Batch::Cache<T>::Vector& cache,
std::function<TT(const T&)> f = [](const T& t) -> TT { return t; }) {
json result = json::array();
const auto count = cache._items.size();
for (uint32_t i = 0; i < count; ++i) {
const auto& cacheEntry = cache._items[i];
const auto& val = cacheEntry._data;
result.push_back(f(val));
}
return result;
}
template <typename T, typename TT = const T&>
static json writeVector(const std::vector<T>& v,
const std::function<TT(const T&)>& f = [](const T& t) -> TT { return t; }) {
auto node = json::array();
for (const auto& e : v) {
node.push_back(f(e));
}
return node;
}
template <typename T>
static json writeNumericVector(const std::vector<T>& v) {
return writeVector<T, const T&>(v);
}
template <typename T>
static json writeUintVector(const std::vector<T>& v) {
return writeVector<T, const uint32_t&>(v, [](const T& t) -> const uint32_t& {
return reinterpret_cast<const uint32_t&>(t);
});
}
template <size_t N = 1>
static json writeFloatArray(const float* f) {
json result = json::array();
for (size_t i = 0; i < N; ++i) {
result.push_back(f[i]);
}
return result;
}
template <typename T>
static std::vector<T> mapToVector(const std::unordered_map<T, uint32_t>& map) {
std::vector<T> result;
result.resize(map.size());
for (const auto& entry : map) {
if (result[entry.second]) {
throw std::runtime_error("Invalid map");
}
result[entry.second] = entry.first;
}
return result;
}
template <typename T, typename F>
std::function<json(const T&)> memberWriter(F f) {
return std::bind(f, this, std::placeholders::_1);
}
void writeFrame(const Frame& frame);
json writeBatch(const Batch& batch);
json writeTextureTable(const TextureTablePointer& textureTable);
json writeTextureView(const TextureView& textureView);
json writeFramebuffer(const FramebufferPointer& texture);
json writePipeline(const PipelinePointer& pipeline);
json writeSwapchain(const SwapChainPointer& swapchain);
json writeProgram(const ShaderPointer& program);
json writeNamedBatchData(const Batch::NamedBatchData& namedData);
json writeCapturableTextures(const Frame& frame);
void writeBinaryBlob();
static std::string toBase64(const std::vector<uint8_t>& v);
static json writeIrradiance(const SHPointer& irradiance);
static json writeMat4(const glm::mat4& m) {
static const glm::mat4 IDENTITY;
if (m == IDENTITY) {
return json();
}
return writeFloatArray<16>(&m[0][0]);
}
static json writeVec4(const glm::vec4& v) { return writeFloatArray<4>(&v[0]); }
static json writeVec3(const glm::vec3& v) { return writeFloatArray<3>(&v[0]); }
static json writeVec2(const glm::vec2& v) { return writeFloatArray<2>(&v[0]); }
static json writeTransform(const Transform& t) { return writeMat4(t.getMatrix()); }
static json writeCommand(size_t index, const Batch& batch);
static json writeSampler(const Sampler& sampler);
static json writeTexture(const TexturePointer& texture);
static json writeFormat(const Stream::FormatPointer& format);
static json writeQuery(const QueryPointer& query);
static json writeShader(const ShaderPointer& shader);
static json writeBuffer(const BufferPointer& bufferPointer);
static const TextureView DEFAULT_TEXTURE_VIEW;
static const Sampler DEFAULT_SAMPLER;
template <typename T, typename F>
void serializeMap(json& frameNode, const char* key, const std::unordered_map<T, uint32_t>& map, F f) {
auto& node = frameNode[key] = json::array();
for (const auto& item : mapToVector(map)) {
node.push_back(f(item));
}
}
};
void writeFrame(const std::string& filename, const FramePointer& frame, const TextureCapturer& capturer) {
Serializer(filename, capturer).writeFrame(*frame);
}
} // namespace gpu
using namespace gpu;
const TextureView Serializer::DEFAULT_TEXTURE_VIEW = TextureView();
const Sampler Serializer::DEFAULT_SAMPLER = Sampler();
std::string Serializer::toBase64(const std::vector<uint8_t>& v) {
return QByteArray((const char*)v.data(), (int)v.size()).toBase64().toStdString();
}
json Serializer::writeCommand(size_t index, const Batch& batch) {
const auto& command = batch._commands[index];
auto offset = batch._commandOffsets[index];
auto endOffset = batch._params.size();
if ((index + 1) < batch._commands.size()) {
endOffset = batch._commandOffsets[index + 1];
}
json result = json::array();
result.push_back(keys::COMMAND_NAMES[command]);
while (offset < endOffset) {
result.push_back(batch._params[offset]._size);
++offset;
}
return result;
}
json Serializer::writeNamedBatchData(const Batch::NamedBatchData& namedData) {
json result = json::object();
auto& buffersNode = result[keys::buffers] = json::array();
for (const auto& buffer : namedData.buffers) {
buffersNode.push_back(getGlobalIndex(buffer, bufferMap));
}
result[keys::drawCallInfos] = writeUintVector(namedData.drawCallInfos);
return result;
}
json Serializer::writeBatch(const Batch& batch) {
json batchNode;
static const Batch DEFAULT_BATCH;
batchNode[keys::name] = batch.getName();
if (batch._enableSkybox != DEFAULT_BATCH._enableSkybox) {
batchNode[keys::skybox] = batch._enableSkybox;
}
if (batch._enableStereo != DEFAULT_BATCH._enableStereo) {
batchNode[keys::stereo] = batch._enableStereo;
}
if (batch._projectionJitter != DEFAULT_BATCH._projectionJitter) {
batchNode[keys::projectionJitter] = writeVec2(batch._projectionJitter);
}
if (batch._drawcallUniform != DEFAULT_BATCH._drawcallUniform) {
batchNode[keys::drawcallUniform] = batch._drawcallUniform;
}
if (batch._drawcallUniformReset != DEFAULT_BATCH._drawcallUniformReset) {
batchNode[keys::drawcallUniformReset] = batch._drawcallUniformReset;
}
if (0 != batch._textures.size()) {
batchNode[keys::textures] = serializePointerCache(batch._textures, textureMap);
}
if (0 != batch._textureTables.size()) {
batchNode[keys::textureTables] = serializePointerCache(batch._textureTables, textureTableMap);
}
if (0 != batch._buffers.size()) {
batchNode[keys::buffers] = serializePointerCache(batch._buffers, bufferMap);
}
if (0 != batch._pipelines.size()) {
batchNode[keys::pipelines] = serializePointerCache(batch._pipelines, pipelineMap);
}
if (0 != batch._streamFormats.size()) {
batchNode[keys::formats] = serializePointerCache(batch._streamFormats, formatMap);
}
if (0 != batch._framebuffers.size()) {
batchNode[keys::framebuffers] = serializePointerCache(batch._framebuffers, framebufferMap);
}
if (0 != batch._swapChains.size()) {
batchNode[keys::swapchains] = serializePointerCache(batch._swapChains, swapchainMap);
}
if (0 != batch._queries.size()) {
batchNode[keys::queries] = serializePointerCache(batch._queries, queryMap);
}
if (!batch._drawCallInfos.empty()) {
batchNode[keys::drawCallInfos] = writeUintVector(batch._drawCallInfos);
}
if (!batch._data.empty()) {
batchNode[keys::data] = toBase64(batch._data);
}
{
auto& node = batchNode[keys::commands] = json::array();
size_t commandCount = batch._commands.size();
for (size_t i = 0; i < commandCount; ++i) {
node.push_back(writeCommand(i, batch));
}
}
if (0 != batch._transforms.size()) {
batchNode[keys::transforms] =
serializeDataCache<Transform, json>(batch._transforms, [](const Transform& t) { return writeTransform(t); });
}
if (0 != batch._profileRanges.size()) {
batchNode[keys::profileRanges] = serializeDataCache<std::string>(batch._profileRanges);
}
if (0 != batch._names.size()) {
batchNode[keys::names] = serializeDataCache<std::string>(batch._names);
}
if (0 != batch._objects.size()) {
auto transform = [](const Batch::TransformObject& object) -> json { return writeMat4(object._model); };
batchNode[keys::objects] = writeVector<Batch::TransformObject, json>(batch._objects, transform);
}
if (!batch._namedData.empty()) {
auto& namedDataNode = batchNode[keys::namedData] = json::object();
for (const auto& entry : batch._namedData) {
namedDataNode[entry.first] = writeNamedBatchData(entry.second);
}
}
// LambdaCache _lambdas;
return batchNode;
}
json Serializer::writeSampler(const Sampler& sampler) {
json result = json::object();
if (sampler.getBorderColor() != DEFAULT_SAMPLER.getBorderColor()) {
result[keys::borderColor] = writeVec4(sampler.getBorderColor());
}
if (sampler.getMaxAnisotropy() != DEFAULT_SAMPLER.getMaxAnisotropy()) {
result[keys::maxAnisotropy] = sampler.getMaxAnisotropy();
}
if (sampler.getWrapModeU() != DEFAULT_SAMPLER.getWrapModeU()) {
result[keys::wrapModeU] = sampler.getWrapModeU();
}
if (sampler.getWrapModeV() != DEFAULT_SAMPLER.getWrapModeV()) {
result[keys::wrapModeV] = sampler.getWrapModeV();
}
if (sampler.getWrapModeW() != DEFAULT_SAMPLER.getWrapModeW()) {
result[keys::wrapModeW] = sampler.getWrapModeW();
}
if (sampler.getFilter() != DEFAULT_SAMPLER.getFilter()) {
result[keys::filter] = sampler.getFilter();
}
if (sampler.getComparisonFunction() != DEFAULT_SAMPLER.getComparisonFunction()) {
result[keys::comparisonFunction] = sampler.getComparisonFunction();
}
if (sampler.getMinMip() != DEFAULT_SAMPLER.getMinMip()) {
result[keys::minMip] = sampler.getMinMip();
}
if (sampler.getMaxMip() != DEFAULT_SAMPLER.getMaxMip()) {
result[keys::maxMip] = sampler.getMaxMip();
}
if (sampler.getMipOffset() != DEFAULT_SAMPLER.getMipOffset()) {
result[keys::mipOffset] = sampler.getMipOffset();
}
return result;
}
json Serializer::writeBuffer(const BufferPointer& bufferPointer) {
if (!bufferPointer) {
return json();
}
return json(bufferPointer->getSize());
}
json Serializer::writeIrradiance(const SHPointer& irradiancePointer) {
if (!irradiancePointer) {
return json();
}
json result = json::object();
const auto& irradiance = *irradiancePointer;
result[keys::L00] = writeVec3(irradiance.L00);
result[keys::L1m1] = writeVec3(irradiance.L1m1);
result[keys::L10] = writeVec3(irradiance.L10);
result[keys::L11] = writeVec3(irradiance.L11);
result[keys::L2m2] = writeVec3(irradiance.L2m2);
result[keys::L2m1] = writeVec3(irradiance.L2m1);
result[keys::L20] = writeVec3(irradiance.L20);
result[keys::L21] = writeVec3(irradiance.L21);
result[keys::L22] = writeVec3(irradiance.L22);
return result;
}
json Serializer::writeTexture(const TexturePointer& texturePointer) {
if (!texturePointer) {
return json();
}
const auto& texture = *texturePointer;
json result = json::object();
if (!texture.source().empty()) {
result[keys::source] = texture.source();
}
const auto usageType = texture.getUsageType();
result[keys::usageType] = usageType;
result[keys::type] = texture.getType();
result[keys::width] = texture._width;
result[keys::height] = texture._height;
result[keys::depth] = texture._depth;
result[keys::mips] = texture._maxMipLevel + 1;
result[keys::samples] = texture._numSamples;
result[keys::layers] = texture._numSlices;
result[keys::texelFormat] = texture.getTexelFormat().getRaw();
if (texture.isIrradianceValid()) {
result["irradiance"] = writeIrradiance(texture._irradiance);
}
if (texture._sampler != DEFAULT_SAMPLER) {
result[keys::sampler] = writeSampler(texture._sampler);
}
if (usageType == TextureUsageType::RENDERBUFFER) {
// TODO figure out if the buffer contents need to be preserved (if it's used as an input before it's ever written to)
// This might be the case for things like the TAA output attachments from the previous frame
} else if (usageType == TextureUsageType::EXTERNAL) {
// TODO serialize the current GL contents (if any) to the JSON
} else {
const auto* storage = texture._storage.get();
const auto* ktxStorage = dynamic_cast<const Texture::KtxStorage*>(storage);
if (ktxStorage) {
result[keys::ktxFile] = ktxStorage->_filename;
} else {
// TODO serialize the backing storage
}
}
return result;
}
json Serializer::writeTextureView(const TextureView& textureView) {
static const auto DEFAULT_TEXTURE_VIEW = TextureView();
json result = json::object();
if (textureView._texture) {
result[keys::texture] = getGlobalIndex(textureView._texture, textureMap);
}
if (textureView._subresource != DEFAULT_TEXTURE_VIEW._subresource) {
result[keys::subresource] = textureView._subresource;
}
if (textureView._element != DEFAULT_TEXTURE_VIEW._element) {
result[keys::element] = textureView._element.getRaw();
}
return result;
}
json Serializer::writeFramebuffer(const FramebufferPointer& framebufferPointer) {
if (!framebufferPointer) {
return json();
}
auto result = json::object();
const auto& framebuffer = *framebufferPointer;
if (!framebuffer._name.empty()) {
result[keys::name] = framebuffer._name;
}
if (framebuffer._bufferMask != 0) {
result[keys::bufferMask] = framebuffer._bufferMask;
}
if (framebuffer._height != 0) {
result[keys::height] = framebuffer._height;
}
if (framebuffer._width != 0) {
result[keys::width] = framebuffer._width;
}
if (framebuffer._numSamples != 0 && framebuffer._numSamples != 1) {
result[keys::sampleCount] = framebuffer._numSamples;
}
if (framebuffer._depthStencilBuffer.isValid()) {
result[keys::depthStencilAttachment] = writeTextureView(framebuffer._depthStencilBuffer);
}
if (!framebuffer._renderBuffers.empty()) {
size_t rendereBufferCount = 0;
for (size_t i = 0; i < framebuffer._renderBuffers.size(); ++i) {
if (framebuffer._renderBuffers[i].isValid()) {
rendereBufferCount = i + 1;
}
}
if (rendereBufferCount > 0) {
auto& node = result[keys::colorAttachments] = json::array();
for (size_t i = 0; i < rendereBufferCount; ++i) {
node.push_back(writeTextureView(framebuffer._renderBuffers[i]));
}
}
}
//SwapchainPointer _swapchain;
return result;
}
json Serializer::writeTextureTable(const TextureTablePointer& textureTablePointer) {
auto tableNode = json::array();
const auto& textureTable = *textureTablePointer;
for (const auto& texture : textureTable.getTextures()) {
tableNode.push_back(getGlobalIndex(texture, textureMap));
}
return tableNode;
}
json Serializer::writeFormat(const Stream::FormatPointer& formatPointer) {
if (!formatPointer) {
return json();
}
const auto& format = *formatPointer;
json result = json::object();
auto& attributesNode = result[keys::attributes] = json::array();
static const auto DEFAULT_ATTRIBUTE = Stream::Attribute();
for (const auto& entry : format._attributes) {
const auto& attribute = entry.second;
auto attributeNode = json::object();
attributeNode[keys::slot] = attribute._slot;
attributeNode[keys::channel] = attribute._channel;
if (DEFAULT_ATTRIBUTE._element.getRaw() != attribute._element.getRaw()) {
attributeNode[keys::element] = attribute._element.getRaw();
}
if (DEFAULT_ATTRIBUTE._frequency != attribute._frequency) {
attributeNode[keys::frequency] = attribute._frequency;
}
if (DEFAULT_ATTRIBUTE._offset != attribute._offset) {
attributeNode[keys::offset] = attribute._offset;
}
attributesNode.push_back(attributeNode);
}
return result;
}
#define SET_IF_NOT_DEFAULT(FIELD) \
if (value.FIELD != DEFAULT.FIELD) { \
result[keys::FIELD] = value.FIELD; \
}
#define SET_IF_NOT_DEFAULT_(FIELD) \
if (value._##FIELD != DEFAULT._##FIELD) { \
result[keys::FIELD] = value._##FIELD; \
}
#define SET_IF_NOT_DEFAULT_TRANSFORM(FIELD, TRANSFORM) \
if (value.FIELD != DEFAULT.FIELD) { \
result[keys::FIELD] = TRANSFORM(value.FIELD); \
}
static json writeBlendFunction(const State::BlendFunction& value) {
static const State::BlendFunction DEFAULT;
json result = json::object();
SET_IF_NOT_DEFAULT(enabled);
SET_IF_NOT_DEFAULT(sourceColor);
SET_IF_NOT_DEFAULT(sourceAlpha);
SET_IF_NOT_DEFAULT(destColor);
SET_IF_NOT_DEFAULT(destAlpha);
SET_IF_NOT_DEFAULT(destColor);
SET_IF_NOT_DEFAULT(opAlpha);
SET_IF_NOT_DEFAULT(opColor);
return result;
}
static json writeStateFlags(const State::Flags& value) {
static const State::Flags DEFAULT;
json result = json::object();
SET_IF_NOT_DEFAULT(frontFaceClockwise);
SET_IF_NOT_DEFAULT(depthClampEnable);
SET_IF_NOT_DEFAULT(scissorEnable);
SET_IF_NOT_DEFAULT(multisampleEnable);
SET_IF_NOT_DEFAULT(antialisedLineEnable);
SET_IF_NOT_DEFAULT(alphaToCoverageEnable);
return result;
}
static json writeDepthTest(const State::DepthTest& value) {
static const State::DepthTest DEFAULT;
json result = json::object();
SET_IF_NOT_DEFAULT(writeMask);
SET_IF_NOT_DEFAULT(enabled);
SET_IF_NOT_DEFAULT(function);
return result;
}
static json writeStereoState(const StereoState& value) {
static const StereoState DEFAULT;
json result = json::object();
SET_IF_NOT_DEFAULT_(enable);
SET_IF_NOT_DEFAULT_(contextDisable);
SET_IF_NOT_DEFAULT_(skybox);
if ((value._eyeProjections[0] != DEFAULT._eyeProjections[0]) || (value._eyeProjections[1] != DEFAULT._eyeProjections[1])) {
json projections = json::array();
projections.push_back(Serializer::writeMat4(value._eyeProjections[0]));
projections.push_back(Serializer::writeMat4(value._eyeProjections[1]));
result[keys::eyeProjections] = projections;
}
if ((value._eyeViews[0] != DEFAULT._eyeViews[0]) || (value._eyeViews[1] != DEFAULT._eyeViews[1])) {
json views = json::array();
views.push_back(Serializer::writeMat4(value._eyeViews[0]));
views.push_back(Serializer::writeMat4(value._eyeViews[1]));
result[keys::eyeViews] = views;
}
return result;
}
static json writeStencilTest(const State::StencilTest& value) {
static const State::StencilTest DEFAULT;
json result = json::object();
SET_IF_NOT_DEFAULT(function);
SET_IF_NOT_DEFAULT(failOp);
SET_IF_NOT_DEFAULT(depthFailOp);
SET_IF_NOT_DEFAULT(passOp);
SET_IF_NOT_DEFAULT(reference);
SET_IF_NOT_DEFAULT(readMask);
return result;
}
static json writeStencilActivation(const State::StencilActivation& value) {
static const State::StencilActivation DEFAULT;
json result = json::object();
SET_IF_NOT_DEFAULT(frontWriteMask);
SET_IF_NOT_DEFAULT(backWriteMask);
SET_IF_NOT_DEFAULT(enabled);
return result;
}
json writeState(const StatePointer& statePointer) {
if (!statePointer) {
return json();
}
const auto& state = *statePointer;
const auto& value = state.getValues();
const auto& DEFAULT = State::DEFAULT;
auto result = json::object();
SET_IF_NOT_DEFAULT(colorWriteMask);
SET_IF_NOT_DEFAULT(cullMode);
SET_IF_NOT_DEFAULT(depthBias);
SET_IF_NOT_DEFAULT(depthBiasSlopeScale);
SET_IF_NOT_DEFAULT(fillMode);
SET_IF_NOT_DEFAULT(sampleMask);
SET_IF_NOT_DEFAULT_TRANSFORM(blendFunction, writeBlendFunction);
SET_IF_NOT_DEFAULT_TRANSFORM(flags, writeStateFlags);
SET_IF_NOT_DEFAULT_TRANSFORM(depthTest, writeDepthTest);
SET_IF_NOT_DEFAULT_TRANSFORM(stencilActivation, writeStencilActivation);
SET_IF_NOT_DEFAULT_TRANSFORM(stencilTestFront, writeStencilTest);
SET_IF_NOT_DEFAULT_TRANSFORM(stencilTestBack, writeStencilTest);
return result;
}
json Serializer::writePipeline(const PipelinePointer& pipelinePointer) {
if (!pipelinePointer) {
return json();
}
const auto& pipeline = *pipelinePointer;
auto result = json::object();
result[keys::state] = writeState(pipeline.getState());
result[keys::program] = getGlobalIndex(pipeline.getProgram(), programMap);
return result;
}
json Serializer::writeProgram(const ShaderPointer& programPointer) {
if (!programPointer) {
return json();
}
const auto& program = *programPointer;
auto result = json::array();
for (const auto& shader : program._shaders) {
result.push_back(getGlobalIndex(shader, shaderMap));
}
return result;
}
json Serializer::writeShader(const ShaderPointer& shaderPointer) {
if (!shaderPointer) {
return json();
}
auto result = json::object();
const auto& shader = *shaderPointer;
result[keys::id] = shader._source.id;
result[keys::name] = shader._source.name;
result[keys::type] = shader._type;
return result;
}
json Serializer::writeSwapchain(const SwapChainPointer& swapchainPointer) {
auto framebufferSwapchainPointer = std::static_pointer_cast<FramebufferSwapChain>(swapchainPointer);
if (!framebufferSwapchainPointer) {
return json();
}
const FramebufferSwapChain& swapchain = *framebufferSwapchainPointer;
auto result = json::object();
result[keys::size] = swapchain.getSize();
auto& framebuffersNode = result[keys::framebuffers] = json::array();
for (uint32_t i = 0; i < swapchain.getSize(); ++i) {
uint32_t index = getGlobalIndex(swapchain.get(i), framebufferMap);
framebuffersNode.push_back(index);
}
return result;
}
json Serializer::writeQuery(const QueryPointer& queryPointer) {
if (!queryPointer) {
return json();
}
const auto& query = *queryPointer;
auto result = json::object();
result[keys::name] = query._name;
return result;
}
json Serializer::writeCapturableTextures(const Frame& frame) {
if (!textureCapturer) {
return json::array();
}
std::unordered_set<TexturePointer> writtenRenderbuffers;
std::unordered_set<TexturePointer> captureTextures;
auto maybeCaptureTexture = [&](const TexturePointer& texture) {
// Not a valid texture
if (!texture) {
return;
}
// Not a renderbuffer
if (texture->getUsageType() != TextureUsageType::RENDERBUFFER) {
return;
}
// Already used in a target framebuffer
if (writtenRenderbuffers.count(texture) > 0) {
return;
}
captureTextures.insert(texture);
};
for (const auto& batchPtr : frame.batches) {
const auto& batch = *batchPtr;
batch.forEachCommand([&](Batch::Command command, const Batch::Param* params) {
switch (command) {
case Batch::COMMAND_setResourceTexture: {
const auto& texture = batch._textures.get(params[0]._uint);
maybeCaptureTexture(texture);
} break;
case Batch::COMMAND_setResourceTextureTable: {
const auto& textureTablePointer = batch._textureTables.get(params[0]._uint);
if (textureTablePointer) {
for (const auto& texture : textureTablePointer->getTextures()) {
maybeCaptureTexture(texture);
}
}
} break;
case Batch::COMMAND_setFramebuffer: {
const auto& framebufferPointer = batch._framebuffers.get(params[0]._uint);
if (framebufferPointer) {
const auto& framebuffer = *framebufferPointer;
for (const auto& colorAttachment : framebuffer._renderBuffers) {
if (colorAttachment._texture) {
writtenRenderbuffers.insert(colorAttachment._texture);
}
}
if (framebuffer._depthStencilBuffer._texture) {
writtenRenderbuffers.insert(framebuffer._depthStencilBuffer._texture);
}
}
}
case Batch::COMMAND_setResourceFramebufferSwapChainTexture:
default:
break;
}
}); // for each command
} // for each batch
for (const auto& entry : textureMap) {
const auto& texturePointer = entry.first;
if (!texturePointer) {
continue;
}
const auto& texture = *texturePointer;
auto usageType = texture.getUsageType();
if (usageType == TextureUsageType::RESOURCE || usageType == TextureUsageType::STRICT_RESOURCE) {
const auto* storage = texture._storage.get();
const auto* ktxStorage = dynamic_cast<const Texture::KtxStorage*>(storage);
if (!ktxStorage) {
captureTextures.insert(texturePointer);
}
}
}
json result = json::array();
for (const auto& texture : captureTextures) {
if (textureCapturer) {
auto index = textureMap[texture];
auto layers = std::max<uint16>(texture->getNumSlices(), 1);
for (uint16 layer = 0; layer < layers; ++layer) {
std::string textureFilename = basename + "." + std::to_string(index) + "." + std::to_string(layer) + ".png";
textureCapturer(textureFilename, texture, layer);
}
result.push_back(index);
}
}
return result;
}
void Serializer::writeFrame(const Frame& frame) {
json frameNode = json::object();
frameNode[keys::batches] = json::array();
for (const auto& batchPointer : frame.batches) {
frameNode[keys::batches].push_back(writeBatch(*batchPointer));
}
frameNode[keys::stereo] = writeStereoState(frame.stereoState);
frameNode[keys::capturedTextures] = writeCapturableTextures(frame);
frameNode[keys::frameIndex] = frame.frameIndex;
frameNode[keys::view] = writeMat4(frame.view);
frameNode[keys::pose] = writeMat4(frame.pose);
frameNode[keys::framebuffer] = getGlobalIndex(frame.framebuffer, framebufferMap);
using namespace std::placeholders;
serializeMap(frameNode, keys::swapchains, swapchainMap, std::bind(&Serializer::writeSwapchain, this, _1));
serializeMap(frameNode, keys::framebuffers, framebufferMap, [this](const auto& t) { return writeFramebuffer(t); });
serializeMap(frameNode, keys::textureTables, textureTableMap, [this](const auto& t) { return writeTextureTable(t); });
serializeMap(frameNode, keys::pipelines, pipelineMap, [this](const auto& t) { return writePipeline(t); });
serializeMap(frameNode, keys::programs, programMap, [this](const auto& t) { return writeProgram(t); });
serializeMap(frameNode, keys::shaders, shaderMap, writeShader);
serializeMap(frameNode, keys::queries, queryMap, writeQuery);
serializeMap(frameNode, keys::formats, formatMap, writeFormat);
// Serialize textures and buffers last, since the maps they use can be populated by some of the above code
// Serialize textures
serializeMap(frameNode, keys::textures, textureMap, writeTexture);
// Serialize buffers
serializeMap(frameNode, keys::buffers, bufferMap, writeBuffer);
{
std::string frameJson = frameNode.dump();
std::string filename = basename + ".json";
storage::FileStorage::create(filename.c_str(), frameJson.size(), (const uint8_t*)frameJson.data());
}
writeBinaryBlob();
frameNode[keys::binary] = basename + ".bin";
}
void Serializer::writeBinaryBlob() {
const auto buffers = mapToVector(bufferMap);
auto accumulator = [](size_t total, const BufferPointer& buffer) { return total + (buffer ? buffer->getSize() : 0); };
size_t totalSize = std::accumulate(buffers.begin(), buffers.end(), (size_t)0, accumulator);
const auto blobFilename = basename + ".bin";
QFile file(blobFilename.c_str());
if (!file.open(QFile::ReadWrite | QIODevice::Truncate)) {
throw std::runtime_error("Unable to open file for writing");
}
if (!file.resize(totalSize)) {
throw std::runtime_error("Unable to resize file");
}
auto mapped = file.map(0, totalSize);
size_t offset = 0;
for (const auto& bufferPointer : buffers) {
if (!bufferPointer) {
continue;
}
const auto& buffer = *bufferPointer;
const auto bufferSize = buffer.getSize();
const auto& bufferData = buffer._renderSysmem.readData();
memcpy(mapped + offset, bufferData, bufferSize);
offset += bufferSize;
}
if (!file.unmap(mapped)) {
throw std::runtime_error("Unable to unmap file");
}
}

View file

@ -173,6 +173,8 @@ protected:
void updateSize(const TexturePointer& texture);
bool assignDepthStencilBuffer(const TexturePointer& texture, const Format& format, uint32 subresource);
friend class Serializer;
friend class Deserializer;
// Non exposed
Framebuffer(const Framebuffer& framebuffer) = delete;
Framebuffer() {}

View file

@ -45,6 +45,9 @@ namespace gpu {
const std::string _name;
uint64_t _queryResult { 0 };
uint64_t _usecBatchElapsedTime { 0 };
friend class Serializer;
friend class Deserializer;
};
typedef std::shared_ptr<Query> QueryPointer;

View file

@ -167,6 +167,8 @@ protected:
const Pointer& vertexShader,
const Pointer& geometryShader,
const Pointer& pixelShader);
friend class Serializer;
friend class Deserializer;
};
typedef Shader::Pointer ShaderPointer;

View file

@ -128,7 +128,7 @@ public:
public:
DepthTest(bool enabled = false, bool writeMask = true, ComparisonFunction func = LESS) :
function(func), writeMask(writeMask), enabled(enabled) {}
writeMask(writeMask), enabled(enabled), function(func) {}
bool isEnabled() const { return enabled != 0; }
ComparisonFunction getFunction() const { return function; }

View file

@ -128,6 +128,8 @@ public:
uint32 _elementTotalSize { 0 };
std::string _key;
friend class Serializer;
friend class Deserializer;
void evaluateCache();
};

View file

@ -189,6 +189,8 @@ public:
}
protected:
Desc _desc;
friend class Deserializer;
};
enum class TextureUsageType : uint8 {
@ -371,6 +373,8 @@ public:
ktx::KTXDescriptorPointer _ktxDescriptor;
friend class Texture;
friend class Serializer;
friend class Deserializer;
};
uint16 minAvailableMipLevel() const { return _storage->minAvailableMipLevel(); };
@ -628,6 +632,9 @@ protected:
static TexturePointer create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler);
Size resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips);
friend class Serializer;
friend class Deserializer;
};
typedef std::shared_ptr<Texture> TexturePointer;

View file

@ -142,6 +142,7 @@ public:
// Rendering support
virtual void setContext(const gpu::ContextPointer& context) final { _gpuContext = context; }
virtual void submitFrame(const gpu::FramePointer& newFrame) = 0;
virtual void captureFrame(const std::string& outputName) const { }
virtual float getRenderResolutionScale() const {
return _renderResolutionScale;

View file

@ -70,12 +70,12 @@ StoragePointer FileStorage::create(const QString& filename, size_t size, const u
}
FileStorage::FileStorage(const QString& filename) : _file(filename) {
bool opened = _file.open(QFile::ReadWrite);
bool opened = _file.open(QFile::ReadWrite | QFile::Unbuffered);
if (opened) {
_hasWriteAccess = true;
} else {
_hasWriteAccess = false;
opened = _file.open(QFile::ReadOnly);
opened = _file.open(QFile::ReadOnly| QFile::Unbuffered);
}
if (opened) {

View file

@ -6,31 +6,38 @@ if (NPM_EXECUTABLE)
set_target_properties(jsdoc PROPERTIES FOLDER "Tools")
endif()
function(check_test name)
set(RESULT TRUE)
if (BUILD_TOOLS_INCLUDE)
unset(RESULT)
list(FIND BUILD_TOOLS_INCLUDE ${name} BUILD_TOOL_FIND)
if (NOT (${BUILD_TOOL_FIND} EQUAL -1))
set(RESULT TRUE)
endif()
endif()
set(BUILD_TOOL_RESULT ${RESULT} PARENT_SCOPE)
endfunction()
if (BUILD_TOOLS)
add_subdirectory(udt-test)
set_target_properties(udt-test PROPERTIES FOLDER "Tools")
set(ALL_TOOLS
udt-test
vhacd-util
frame-optimizer
gpu-frame-player
ice-client
ktx-tool
ac-client
skeleton-dump
atp-client
oven
nitpick
)
add_subdirectory(vhacd-util)
set_target_properties(vhacd-util PROPERTIES FOLDER "Tools")
add_subdirectory(ice-client)
set_target_properties(ice-client PROPERTIES FOLDER "Tools")
add_subdirectory(ktx-tool)
set_target_properties(ktx-tool PROPERTIES FOLDER "Tools")
add_subdirectory(ac-client)
set_target_properties(ac-client PROPERTIES FOLDER "Tools")
add_subdirectory(skeleton-dump)
set_target_properties(skeleton-dump PROPERTIES FOLDER "Tools")
add_subdirectory(atp-client)
set_target_properties(atp-client PROPERTIES FOLDER "Tools")
add_subdirectory(oven)
set_target_properties(oven PROPERTIES FOLDER "Tools")
add_subdirectory(nitpick)
set_target_properties(nitpick PROPERTIES FOLDER "Tools")
foreach(TOOL ${ALL_TOOLS})
check_test(${TOOL})
if (${BUILD_TOOL_RESULT})
add_subdirectory(${TOOL})
set_target_properties(${TOOL} PROPERTIES FOLDER "Tools")
endif()
endforeach()
endif()

View file

@ -0,0 +1,6 @@
set(TARGET_NAME frame-optimizer)
setup_memory_debugger()
setup_hifi_project(Gui Widgets)
link_hifi_libraries(shared ktx shaders gpu )
package_libraries_for_deployment()

View file

@ -0,0 +1,39 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <QtCore/QCoreApplication>
#ifdef Q_OS_WIN
#include <Windows.h>
#endif
#include <iostream>
#include <gpu/FrameIO.h>
#include <gpu/Texture.h>
gpu::IndexOptimizer optimizer= [](gpu::Primitive primitive, uint32_t faceCount, uint32_t indexCount, uint32_t* indices ) {
// FIXME add a triangle index optimizer here
};
void messageHandler(QtMsgType type, const QMessageLogContext &, const QString & message) {
auto messageStr = message.toStdString();
#ifdef Q_OS_WIN
OutputDebugStringA(messageStr.c_str());
OutputDebugStringA("\n");
#endif
std::cerr << messageStr << std::endl;
}
int main(int argc, char** argv) {
QCoreApplication app(argc, argv);
qInstallMessageHandler(messageHandler);
gpu::optimizeFrame("D:/Frames/20190112_1647.json", optimizer);
return 0;
}

View file

@ -0,0 +1,19 @@
set(TARGET_NAME gpu-frame-player)
setup_memory_debugger()
setup_hifi_project(Gui Widgets)
# link in the shared libraries
link_hifi_libraries(
shared ktx shaders gpu
# vk gpu-vk
gl ${PLATFORM_GL_BACKEND}
)
target_compile_definitions(${TARGET_NAME} PRIVATE USE_GL)
target_opengl()
#target_vulkan()
package_libraries_for_deployment()

View file

@ -0,0 +1,110 @@
//
// Created by Bradley Austin Davis on 2018/10/21
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "PlayerWindow.h"
#include <QtGui/QResizeEvent>
#include <QtGui/QImageReader>
#include <QtGui/QScreen>
#include <QtWidgets/QFileDialog>
#include <gpu/FrameIO.h>
PlayerWindow::PlayerWindow() {
installEventFilter(this);
setFlags(Qt::MSWindowsOwnDC | Qt::Window | Qt::Dialog | Qt::WindowMinMaxButtonsHint | Qt::WindowTitleHint);
#ifdef USE_GL
setSurfaceType(QSurface::OpenGLSurface);
#else
setSurfaceType(QSurface::VulkanSurface);
#endif
setGeometry(QRect(QPoint(), QSize(800, 600)));
create();
show();
// Ensure the window is visible and the GL context is valid
QCoreApplication::processEvents();
_renderThread.initialize(this);
}
PlayerWindow::~PlayerWindow() {
}
bool PlayerWindow::eventFilter(QObject* obj, QEvent* event) {
if (event->type() == QEvent::Close) {
_renderThread.terminate();
}
return QWindow::eventFilter(obj, event);
}
void PlayerWindow::loadFrame() {
static const QString LAST_FILE_KEY{ "lastFile" };
auto lastScene = _settings.value(LAST_FILE_KEY);
QString openDir;
if (lastScene.isValid()) {
QFileInfo lastSceneInfo(lastScene.toString());
if (lastSceneInfo.absoluteDir().exists()) {
openDir = lastSceneInfo.absolutePath();
}
}
QString fileName = QFileDialog::getOpenFileName(nullptr, tr("Open File"), openDir, tr("GPU Frames (*.json)"));
if (fileName.isNull()) {
return;
}
_settings.setValue(LAST_FILE_KEY, fileName);
loadFrame(fileName);
}
void PlayerWindow::keyPressEvent(QKeyEvent* event) {
switch (event->key()) {
case Qt::Key_F1:
loadFrame();
return;
default:
break;
}
}
void PlayerWindow::resizeEvent(QResizeEvent* ev) {
_renderThread.resize(ev->size());
}
void PlayerWindow::textureLoader(const std::string& filename, const gpu::TexturePointer& texture, uint16_t layer) {
QImage image;
QImageReader(filename.c_str()).read(&image);
if (layer > 0) {
return;
}
texture->assignStoredMip(0, image.byteCount(), image.constBits());
}
void PlayerWindow::loadFrame(const QString& path) {
auto frame = gpu::readFrame(path.toStdString(), _renderThread._externalTexture, &PlayerWindow::textureLoader);
if (frame) {
_renderThread.submitFrame(frame);
if (!_renderThread.isThreaded()) {
_renderThread.process();
}
}
if (frame->framebuffer) {
const auto& fbo = *frame->framebuffer;
glm::uvec2 size{ fbo.getWidth(), fbo.getHeight() };
auto screenSize = screen()->size();
static const glm::uvec2 maxSize{ screenSize.width() - 100, screenSize.height() - 100 };
while (glm::any(glm::greaterThan(size, maxSize))) {
size /= 2;
}
resize(size.x, size.y);
}
_renderThread.submitFrame(frame);
}

View file

@ -0,0 +1,34 @@
//
// Created by Bradley Austin Davis on 2018/10/21
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#include <QtGui/QWindow>
#include <QtCore/QSettings>
#include <gpu/Forward.h>
#include "RenderThread.h"
// Create a simple OpenGL window that renders text in various ways
class PlayerWindow : public QWindow {
public:
PlayerWindow();
virtual ~PlayerWindow();
protected:
bool eventFilter(QObject* obj, QEvent* event) override;
void keyPressEvent(QKeyEvent* event) override;
void resizeEvent(QResizeEvent* ev) override;
void loadFrame();
void loadFrame(const QString& path);
private:
static void textureLoader(const std::string& filename, const gpu::TexturePointer& texture, uint16_t layer);
QSettings _settings;
RenderThread _renderThread;
};

View file

@ -0,0 +1,271 @@
//
// Created by Bradley Austin Davis on 2018/10/21
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "RenderThread.h"
#include <QtGui/QWindow>
#include <gl/QOpenGLContextWrapper.h>
void RenderThread::submitFrame(const gpu::FramePointer& frame) {
std::unique_lock<std::mutex> lock(_frameLock);
_pendingFrames.push(frame);
}
void RenderThread::resize(const QSize& newSize) {
std::unique_lock<std::mutex> lock(_frameLock);
_pendingSize.push(newSize);
}
void RenderThread::initialize(QWindow* window) {
std::unique_lock<std::mutex> lock(_frameLock);
setObjectName("RenderThread");
Parent::initialize();
_window = window;
#ifdef USE_GL
_context.setWindow(window);
_context.create();
_context.makeCurrent();
QOpenGLContextWrapper(_context.qglContext()).makeCurrent(_window);
glGenTextures(1, &_externalTexture);
glBindTexture(GL_TEXTURE_2D, _externalTexture);
static const glm::u8vec4 color{ 0 };
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, &color);
gl::setSwapInterval(0);
// GPU library init
gpu::Context::init<gpu::gl::GLBackend>();
_context.makeCurrent();
_gpuContext = std::make_shared<gpu::Context>();
_backend = _gpuContext->getBackend();
_context.doneCurrent();
_context.moveToThread(_thread);
#else
auto size = window->size();
_extent = vk::Extent2D{ (uint32_t)size.width(), (uint32_t)size.height() };
_context.setValidationEnabled(true);
_context.requireExtensions({
std::string{ VK_KHR_SURFACE_EXTENSION_NAME },
std::string{ VK_KHR_WIN32_SURFACE_EXTENSION_NAME },
});
_context.requireDeviceExtensions({ VK_KHR_SWAPCHAIN_EXTENSION_NAME });
_context.createInstance();
_surface = _context.instance.createWin32SurfaceKHR({ {}, GetModuleHandle(NULL), (HWND)window->winId() });
_context.createDevice(_surface);
_swapchain.setSurface(_surface);
_swapchain.create(_extent, true);
setupRenderPass();
setupFramebuffers();
acquireComplete = _context.device.createSemaphore(vk::SemaphoreCreateInfo{});
renderComplete = _context.device.createSemaphore(vk::SemaphoreCreateInfo{});
// GPU library init
gpu::Context::init<gpu::vulkan::VKBackend>();
_gpuContext = std::make_shared<gpu::Context>();
_backend = _gpuContext->getBackend();
#endif
}
void RenderThread::setup() {
// Wait until the context has been moved to this thread
{ std::unique_lock<std::mutex> lock(_frameLock); }
_gpuContext->beginFrame();
_gpuContext->endFrame();
#ifdef USE_GL
_context.makeCurrent();
glViewport(0, 0, 800, 600);
(void)CHECK_GL_ERROR();
#endif
_elapsed.start();
}
void RenderThread::shutdown() {
_activeFrame.reset();
while (!_pendingFrames.empty()) {
_gpuContext->consumeFrameUpdates(_pendingFrames.front());
_pendingFrames.pop();
}
_gpuContext->shutdown();
_gpuContext.reset();
}
#ifndef USE_GL
extern vk::CommandBuffer currentCommandBuffer;
#endif
void RenderThread::renderFrame(gpu::FramePointer& frame) {
++_presentCount;
#ifdef USE_GL
_context.makeCurrent();
#endif
_backend->recycle();
_backend->syncCache();
auto windowSize = _window->size();
#ifndef USE_GL
auto windowExtent = vk::Extent2D{ (uint32_t)windowSize.width(), (uint32_t)windowSize.height() };
if (windowExtent != _extent) {
return;
}
if (_extent != _swapchain.extent) {
_swapchain.create(_extent);
setupFramebuffers();
return;
}
static const vk::Offset2D offset;
static const std::array<vk::ClearValue, 2> clearValues{
vk::ClearColorValue(std::array<float, 4Ui64>{ { 0.2f, 0.2f, 0.2f, 0.2f } }),
vk::ClearDepthStencilValue({ 1.0f, 0 }),
};
auto swapchainIndex = _swapchain.acquireNextImage(acquireComplete).value;
auto framebuffer = _framebuffers[swapchainIndex];
const auto& commandBuffer = currentCommandBuffer = _context.createCommandBuffer();
auto rect = vk::Rect2D{ offset, _extent };
vk::RenderPassBeginInfo beginInfo{ _renderPass, framebuffer, rect, (uint32_t)clearValues.size(), clearValues.data() };
commandBuffer.begin(vk::CommandBufferBeginInfo{ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
using namespace vks::debug::marker;
beginRegion(commandBuffer, "executeFrame", glm::vec4{ 1, 1, 1, 1 });
#endif
if (frame && !frame->batches.empty()) {
_gpuContext->executeFrame(frame);
}
#ifdef USE_GL
auto& glbackend = (gpu::gl::GLBackend&)(*_backend);
glm::uvec2 fboSize{ frame->framebuffer->getWidth(), frame->framebuffer->getHeight() };
auto fbo = glbackend.getFramebufferID(frame->framebuffer);
glDisable(GL_FRAMEBUFFER_SRGB);
glBlitNamedFramebuffer(fbo, 0, 0, 0, fboSize.x, fboSize.y, 0, 0, windowSize.width(), windowSize.height(),
GL_COLOR_BUFFER_BIT, GL_NEAREST);
(void)CHECK_GL_ERROR();
_context.swapBuffers();
_context.doneCurrent();
#else
endRegion(commandBuffer);
beginRegion(commandBuffer, "renderpass:testClear", glm::vec4{ 0, 1, 1, 1 });
commandBuffer.beginRenderPass(beginInfo, vk::SubpassContents::eInline);
commandBuffer.endRenderPass();
endRegion(commandBuffer);
commandBuffer.end();
static const vk::PipelineStageFlags waitFlags{ vk::PipelineStageFlagBits::eBottomOfPipe };
vk::SubmitInfo submitInfo{ 1, &acquireComplete, &waitFlags, 1, &commandBuffer, 1, &renderComplete };
vk::Fence frameFence = _context.device.createFence(vk::FenceCreateInfo{});
_context.queue.submit(submitInfo, frameFence);
_swapchain.queuePresent(renderComplete);
_context.trashCommandBuffers({ commandBuffer });
_context.emptyDumpster(frameFence);
_context.recycle();
#endif
}
bool RenderThread::process() {
std::queue<gpu::FramePointer> pendingFrames;
std::queue<QSize> pendingSize;
{
std::unique_lock<std::mutex> lock(_frameLock);
pendingFrames.swap(_pendingFrames);
pendingSize.swap(_pendingSize);
}
while (!pendingFrames.empty()) {
_activeFrame = pendingFrames.front();
_gpuContext->consumeFrameUpdates(_activeFrame);
pendingFrames.pop();
}
while (!pendingSize.empty()) {
#ifndef USE_GL
const auto& size = pendingSize.front();
_extent = { (uint32_t)size.width(), (uint32_t)size.height() };
#endif
pendingSize.pop();
}
if (!_activeFrame) {
QThread::msleep(1);
return true;
}
renderFrame(_activeFrame);
return true;
}
#ifndef USE_GL
void RenderThread::setupFramebuffers() {
// Recreate the frame buffers
_context.trashAll<vk::Framebuffer>(_framebuffers, [this](const std::vector<vk::Framebuffer>& framebuffers) {
for (const auto& framebuffer : framebuffers) {
_device.destroy(framebuffer);
}
});
vk::ImageView attachment;
vk::FramebufferCreateInfo framebufferCreateInfo;
framebufferCreateInfo.renderPass = _renderPass;
framebufferCreateInfo.attachmentCount = 1;
framebufferCreateInfo.pAttachments = &attachment;
framebufferCreateInfo.width = _extent.width;
framebufferCreateInfo.height = _extent.height;
framebufferCreateInfo.layers = 1;
// Create frame buffers for every swap chain image
_framebuffers = _swapchain.createFramebuffers(framebufferCreateInfo);
}
void RenderThread::setupRenderPass() {
if (_renderPass) {
_device.destroy(_renderPass);
}
vk::AttachmentDescription attachment;
// Color attachment
attachment.format = _swapchain.colorFormat;
attachment.loadOp = vk::AttachmentLoadOp::eClear;
attachment.storeOp = vk::AttachmentStoreOp::eStore;
attachment.initialLayout = vk::ImageLayout::eUndefined;
attachment.finalLayout = vk::ImageLayout::ePresentSrcKHR;
vk::AttachmentReference colorAttachmentReference;
colorAttachmentReference.attachment = 0;
colorAttachmentReference.layout = vk::ImageLayout::eColorAttachmentOptimal;
vk::SubpassDescription subpass;
subpass.pipelineBindPoint = vk::PipelineBindPoint::eGraphics;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentReference;
vk::SubpassDependency subpassDependency;
subpassDependency.srcSubpass = 0;
subpassDependency.srcAccessMask = vk::AccessFlagBits::eColorAttachmentWrite;
subpassDependency.srcStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput;
subpassDependency.dstSubpass = VK_SUBPASS_EXTERNAL;
subpassDependency.dstAccessMask = vk::AccessFlagBits::eColorAttachmentRead;
subpassDependency.dstStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput;
vk::RenderPassCreateInfo renderPassInfo;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &attachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &subpassDependency;
_renderPass = _device.createRenderPass(renderPassInfo);
}
#endif

View file

@ -0,0 +1,68 @@
//
// Created by Bradley Austin Davis on 2018/10/21
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#include <QtCore/QElapsedTimer>
#include <GenericThread.h>
#include <shared/RateCounter.h>
#ifdef USE_GL
#include <gl/Config.h>
#include <gl/Context.h>
#include <gpu/gl/GLBackend.h>
#else
#include <vk/VKWindow.h>
#include <gpu/vk/VKBackend.h>
#endif
class RenderThread : public GenericThread {
using Parent = GenericThread;
public:
QWindow* _window{ nullptr };
#ifdef USE_GL
gl::Context _context;
#else
vks::Context& _context{ vks::Context::get() };
const vk::Device& _device{ _context.device };
vk::SurfaceKHR _surface;
vk::RenderPass _renderPass;
vks::Swapchain _swapchain;
vk::Semaphore acquireComplete, renderComplete;
std::vector<vk::Framebuffer> _framebuffers;
vk::Extent2D _extent;
void setupFramebuffers();
void setupRenderPass();
#endif
std::mutex _mutex;
gpu::ContextPointer _gpuContext; // initialized during window creation
std::shared_ptr<gpu::Backend> _backend;
std::atomic<size_t> _presentCount{ 0 };
QElapsedTimer _elapsed;
size_t _frameIndex{ 0 };
std::mutex _frameLock;
std::queue<gpu::FramePointer> _pendingFrames;
std::queue<QSize> _pendingSize;
gpu::FramePointer _activeFrame;
uint32_t _externalTexture{ 0 };
void resize(const QSize& newSize);
void setup() override;
bool process() override;
void shutdown() override;
void submitFrame(const gpu::FramePointer& frame);
void initialize(QWindow* window);
void renderFrame(gpu::FramePointer& frame);
};

View file

@ -0,0 +1,34 @@
//
// Created by Bradley Austin Davis on 2016/07/01
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <QtWidgets/QApplication>
#include <shared/FileLogger.h>
#include "PlayerWindow.h"
Q_DECLARE_LOGGING_CATEGORY(gpu_player_logging)
Q_LOGGING_CATEGORY(gpu_player_logging, "hifi.gpu.player")
QSharedPointer<FileLogger> logger;
static const QString LAST_FRAME_FILE = "lastFrameFile";
static void setup() {
DependencyManager::set<tracing::Tracer>();
}
int main(int argc, char** argv) {
setupHifiApplication("gpuFramePlayer");
QApplication app(argc, argv);
logger.reset(new FileLogger());
setup();
PlayerWindow window;
app.exec();
return 0;
}

59
tools/normalizeFrame.py Normal file
View file

@ -0,0 +1,59 @@
import os
import json
import shutil
import sys
def scriptRelative(*paths):
scriptdir = os.path.dirname(os.path.realpath(sys.argv[0]))
result = os.path.join(scriptdir, *paths)
result = os.path.realpath(result)
result = os.path.normcase(result)
return result
class FrameProcessor:
def __init__(self, filename):
self.filename = filename
dir, name = os.path.split(self.filename)
self.dir = dir
self.ktxDir = os.path.join(self.dir, 'ktx')
os.makedirs(self.ktxDir, exist_ok=True)
self.resDir = scriptRelative("../interface/resources")
if (name.endswith(".json")):
self.name = name[0:-5]
else:
self.name = name
self.filename = self.filename + '.json'
with open(self.filename, 'r') as f:
self.json = json.load(f)
def processKtx(self, texture):
if texture is None: return
if not 'ktxFile' in texture: return
sourceKtx = texture['ktxFile']
if sourceKtx.startswith(':'):
sourceKtx = os.path.join(self.resDir, sourceKtx[3:])
sourceKtxDir, sourceKtxName = os.path.split(sourceKtx)
destKtx = os.path.join(self.ktxDir, sourceKtxName)
if not os.path.isfile(destKtx):
shutil.copy(sourceKtx, destKtx)
newValue = 'ktx/' + sourceKtxName
texture['ktxFile'] = newValue
def process(self):
for texture in self.json['textures']:
self.processKtx(texture)
with open(self.filename, 'w') as f:
json.dump(self.json, f, indent=2)
fp = FrameProcessor("D:/Frames/20190110_1635.json")
fp.process()
#C:\Users\bdavi\git\hifi\interface\resources\meshes