mirror of
https://github.com/overte-org/overte.git
synced 2025-08-08 16:38:27 +02:00
Merge branch 'master' of github.com:highfidelity/hifi into toulouse
This commit is contained in:
commit
d84bebcc28
40 changed files with 697 additions and 293 deletions
24
cmake/macros/TargetWebRTC.cmake
Normal file
24
cmake/macros/TargetWebRTC.cmake
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
#
|
||||||
|
# Copyright 2019 High Fidelity, Inc.
|
||||||
|
#
|
||||||
|
# Distributed under the Apache License, Version 2.0.
|
||||||
|
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
#
|
||||||
|
macro(TARGET_WEBRTC)
|
||||||
|
if (ANDROID)
|
||||||
|
# I don't yet have working libwebrtc for android
|
||||||
|
# include(SelectLibraryConfigurations)
|
||||||
|
# set(INSTALL_DIR ${HIFI_ANDROID_PRECOMPILED}/webrtc/webrtc)
|
||||||
|
# set(WEBRTC_INCLUDE_DIRS "${INSTALL_DIR}/include/webrtc")
|
||||||
|
# set(WEBRTC_LIBRARY_DEBUG ${INSTALL_DIR}/debug/lib/libwebrtc.a)
|
||||||
|
# set(WEBRTC_LIBRARY_RELEASE ${INSTALL_DIR}/lib/libwebrtc.a)
|
||||||
|
# select_library_configurations(WEBRTC)
|
||||||
|
else()
|
||||||
|
set(WEBRTC_INCLUDE_DIRS "${VCPKG_INSTALL_ROOT}/include/webrtc")
|
||||||
|
find_library(WEBRTC_LIBRARY NAMES webrtc PATHS ${VCPKG_INSTALL_ROOT}/lib/ NO_DEFAULT_PATH)
|
||||||
|
target_include_directories(${TARGET_NAME} SYSTEM PUBLIC ${WEBRTC_INCLUDE_DIRS})
|
||||||
|
target_link_libraries(${TARGET_NAME} ${WEBRTC_LIBRARY})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
endmacro()
|
|
@ -1,4 +1,4 @@
|
||||||
Source: hifi-deps
|
Source: hifi-deps
|
||||||
Version: 0.1
|
Version: 0.1
|
||||||
Description: Collected dependencies for High Fidelity applications
|
Description: Collected dependencies for High Fidelity applications
|
||||||
Build-Depends: bullet3, draco, etc2comp, glm, nvtt, openexr (!android), openssl (windows), tbb (!android&!osx), zlib
|
Build-Depends: bullet3, draco, etc2comp, glm, nvtt, openexr (!android), openssl (windows), tbb (!android&!osx), zlib, webrtc (!android)
|
||||||
|
|
3
cmake/ports/webrtc/CONTROL
Normal file
3
cmake/ports/webrtc/CONTROL
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
Source: webrtc
|
||||||
|
Version: 20190626
|
||||||
|
Description: WebRTC
|
36
cmake/ports/webrtc/portfile.cmake
Normal file
36
cmake/ports/webrtc/portfile.cmake
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
include(vcpkg_common_functions)
|
||||||
|
set(WEBRTC_VERSION 20190626)
|
||||||
|
set(MASTER_COPY_SOURCE_PATH ${CURRENT_BUILDTREES_DIR}/src)
|
||||||
|
|
||||||
|
if (ANDROID)
|
||||||
|
# this is handled by hifi_android.py
|
||||||
|
elseif (WIN32)
|
||||||
|
vcpkg_download_distfile(
|
||||||
|
WEBRTC_SOURCE_ARCHIVE
|
||||||
|
URLS https://hifi-public.s3.amazonaws.com/seth/webrtc-20190626-windows.zip
|
||||||
|
SHA512 c0848eddb1579b3bb0496b8785e24f30470f3c477145035fd729264a326a467b9467ae9f426aa5d72d168ad9e9bf2c279150744832736bdf39064d24b04de1a3
|
||||||
|
FILENAME webrtc-20190626-windows.zip
|
||||||
|
)
|
||||||
|
elseif (APPLE)
|
||||||
|
vcpkg_download_distfile(
|
||||||
|
WEBRTC_SOURCE_ARCHIVE
|
||||||
|
URLS https://hifi-public.s3.amazonaws.com/seth/webrtc-20190626-osx.tar.gz
|
||||||
|
SHA512 fc70cec1b5ee87395137b7090f424e2fc2300fc17d744d5ffa1cf7aa0e0f1a069a9d72ba1ad2fb4a640ebeb6c218bda24351ba0083e1ff96c4a4b5032648a9d2
|
||||||
|
FILENAME webrtc-20190626-osx.tar.gz
|
||||||
|
)
|
||||||
|
else ()
|
||||||
|
# else Linux desktop
|
||||||
|
vcpkg_download_distfile(
|
||||||
|
WEBRTC_SOURCE_ARCHIVE
|
||||||
|
URLS https://hifi-public.s3.amazonaws.com/seth/webrtc-20190626-linux.tar.gz
|
||||||
|
SHA512 07d7776551aa78cb09a3ef088a8dee7762735c168c243053b262083d90a1d258cec66dc386f6903da5c4461921a3c2db157a1ee106a2b47e7756cb424b66cc43
|
||||||
|
FILENAME webrtc-20190626-linux.tar.gz
|
||||||
|
)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
vcpkg_extract_source_archive(${WEBRTC_SOURCE_ARCHIVE})
|
||||||
|
|
||||||
|
file(COPY ${MASTER_COPY_SOURCE_PATH}/webrtc/include DESTINATION ${CURRENT_PACKAGES_DIR})
|
||||||
|
file(COPY ${MASTER_COPY_SOURCE_PATH}/webrtc/lib DESTINATION ${CURRENT_PACKAGES_DIR})
|
||||||
|
file(COPY ${MASTER_COPY_SOURCE_PATH}/webrtc/share DESTINATION ${CURRENT_PACKAGES_DIR})
|
||||||
|
file(COPY ${MASTER_COPY_SOURCE_PATH}/webrtc/debug DESTINATION ${CURRENT_PACKAGES_DIR})
|
|
@ -130,12 +130,12 @@ $(document).ready(function(){
|
||||||
html += "<td class='data'><strong>File Name</strong></td>";
|
html += "<td class='data'><strong>File Name</strong></td>";
|
||||||
html += "<td class='data'><strong>Created</strong></td>";
|
html += "<td class='data'><strong>Created</strong></td>";
|
||||||
html += "<td class='data'><strong>Installed</strong></td>";
|
html += "<td class='data'><strong>Installed</strong></td>";
|
||||||
//html += "<td class='data'><strong>Installed By</strong></td></tr>";
|
html += "<td class='data'><strong>Installed By</strong></td></tr>";
|
||||||
html += "<tr><td class='data' id='" + INSTALLED_CONTENT_NAME_ID + "'/>";
|
html += "<tr><td class='data' id='" + INSTALLED_CONTENT_NAME_ID + "'/>";
|
||||||
html += "<td class='data' id='" + INSTALLED_CONTENT_FILENAME_ID + "'/>";
|
html += "<td class='data' id='" + INSTALLED_CONTENT_FILENAME_ID + "'/>";
|
||||||
html += "<td class='data' id='" + INSTALLED_CONTENT_CREATED_ID + "'/>";
|
html += "<td class='data' id='" + INSTALLED_CONTENT_CREATED_ID + "'/>";
|
||||||
html += "<td class='data' id='" + INSTALLED_CONTENT_INSTALLED_ID + "'/>";
|
html += "<td class='data' id='" + INSTALLED_CONTENT_INSTALLED_ID + "'/>";
|
||||||
//html += "<td class='data' id='" + INSTALLED_CONTENT_INSTALLED_BY_ID + "'/></tr>";
|
html += "<td class='data' id='" + INSTALLED_CONTENT_INSTALLED_BY_ID + "'/></tr>";
|
||||||
html += "</tbody></table>";
|
html += "</tbody></table>";
|
||||||
$('#' + Settings.INSTALLED_CONTENT + ' .panel-body').html(html);
|
$('#' + Settings.INSTALLED_CONTENT + ' .panel-body').html(html);
|
||||||
}
|
}
|
||||||
|
@ -379,7 +379,7 @@ $(document).ready(function(){
|
||||||
$('#' + INSTALLED_CONTENT_FILENAME_ID).text(data.installed_content.filename);
|
$('#' + INSTALLED_CONTENT_FILENAME_ID).text(data.installed_content.filename);
|
||||||
$('#' + INSTALLED_CONTENT_CREATED_ID).text(data.installed_content.creation_time ? moment(data.installed_content.creation_time).format('lll') : "");
|
$('#' + INSTALLED_CONTENT_CREATED_ID).text(data.installed_content.creation_time ? moment(data.installed_content.creation_time).format('lll') : "");
|
||||||
$('#' + INSTALLED_CONTENT_INSTALLED_ID).text(data.installed_content.install_time ? moment(data.installed_content.install_time).format('lll') : "");
|
$('#' + INSTALLED_CONTENT_INSTALLED_ID).text(data.installed_content.install_time ? moment(data.installed_content.install_time).format('lll') : "");
|
||||||
//$('#' + INSTALLED_CONTENT_INSTALLED_BY_ID).text(data.installed_content.installed_by);
|
$('#' + INSTALLED_CONTENT_INSTALLED_BY_ID).text(data.installed_content.installed_by);
|
||||||
|
|
||||||
// update the progress bars for current restore status
|
// update the progress bars for current restore status
|
||||||
if (data.status.isRecovering) {
|
if (data.status.isRecovering) {
|
||||||
|
|
|
@ -278,7 +278,7 @@ void AssetsBackupHandler::createBackup(const QString& backupName, QuaZip& zip) {
|
||||||
_backups.emplace_back(backupName, mappings, false);
|
_backups.emplace_back(backupName, mappings, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<bool, QString> AssetsBackupHandler::recoverBackup(const QString& backupName, QuaZip& zip, const QString& sourceFilename) {
|
std::pair<bool, QString> AssetsBackupHandler::recoverBackup(const QString& backupName, QuaZip& zip, const QString& username, const QString& sourceFilename) {
|
||||||
Q_ASSERT(QThread::currentThread() == thread());
|
Q_ASSERT(QThread::currentThread() == thread());
|
||||||
|
|
||||||
if (operationInProgress()) {
|
if (operationInProgress()) {
|
||||||
|
|
|
@ -38,7 +38,7 @@ public:
|
||||||
void loadBackup(const QString& backupName, QuaZip& zip) override;
|
void loadBackup(const QString& backupName, QuaZip& zip) override;
|
||||||
void loadingComplete() override;
|
void loadingComplete() override;
|
||||||
void createBackup(const QString& backupName, QuaZip& zip) override;
|
void createBackup(const QString& backupName, QuaZip& zip) override;
|
||||||
std::pair<bool, QString> recoverBackup(const QString& backupName, QuaZip& zip, const QString& sourceFilename) override;
|
std::pair<bool, QString> recoverBackup(const QString& backupName, QuaZip& zip, const QString& username, const QString& sourceFilename) override;
|
||||||
void deleteBackup(const QString& backupName) override;
|
void deleteBackup(const QString& backupName) override;
|
||||||
void consolidateBackup(const QString& backupName, QuaZip& zip) override;
|
void consolidateBackup(const QString& backupName, QuaZip& zip) override;
|
||||||
bool isCorruptedBackup(const QString& backupName) override;
|
bool isCorruptedBackup(const QString& backupName) override;
|
||||||
|
|
|
@ -30,7 +30,7 @@ public:
|
||||||
virtual void loadBackup(const QString& backupName, QuaZip& zip) = 0;
|
virtual void loadBackup(const QString& backupName, QuaZip& zip) = 0;
|
||||||
virtual void loadingComplete() = 0;
|
virtual void loadingComplete() = 0;
|
||||||
virtual void createBackup(const QString& backupName, QuaZip& zip) = 0;
|
virtual void createBackup(const QString& backupName, QuaZip& zip) = 0;
|
||||||
virtual std::pair<bool, QString> recoverBackup(const QString& backupName, QuaZip& zip, const QString& sourceFilename) = 0;
|
virtual std::pair<bool, QString> recoverBackup(const QString& backupName, QuaZip& zip, const QString& username, const QString& sourceFilename) = 0;
|
||||||
virtual void deleteBackup(const QString& backupName) = 0;
|
virtual void deleteBackup(const QString& backupName) = 0;
|
||||||
virtual void consolidateBackup(const QString& backupName, QuaZip& zip) = 0;
|
virtual void consolidateBackup(const QString& backupName, QuaZip& zip) = 0;
|
||||||
virtual bool isCorruptedBackup(const QString& backupName) = 0;
|
virtual bool isCorruptedBackup(const QString& backupName) = 0;
|
||||||
|
|
|
@ -84,7 +84,7 @@ void ContentSettingsBackupHandler::createBackup(const QString& backupName, QuaZi
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<bool, QString> ContentSettingsBackupHandler::recoverBackup(const QString& backupName, QuaZip& zip, const QString& sourceFilename) {
|
std::pair<bool, QString> ContentSettingsBackupHandler::recoverBackup(const QString& backupName, QuaZip& zip, const QString& username, const QString& sourceFilename) {
|
||||||
if (!zip.setCurrentFile(CONTENT_SETTINGS_BACKUP_FILENAME)) {
|
if (!zip.setCurrentFile(CONTENT_SETTINGS_BACKUP_FILENAME)) {
|
||||||
QString errorStr("Failed to find " + CONTENT_SETTINGS_BACKUP_FILENAME + " while recovering backup");
|
QString errorStr("Failed to find " + CONTENT_SETTINGS_BACKUP_FILENAME + " while recovering backup");
|
||||||
qWarning() << errorStr;
|
qWarning() << errorStr;
|
||||||
|
@ -117,7 +117,7 @@ std::pair<bool, QString> ContentSettingsBackupHandler::recoverBackup(const QStri
|
||||||
{ INSTALLED_CONTENT_NAME, archiveJson[INSTALLED_CONTENT_NAME].toString()},
|
{ INSTALLED_CONTENT_NAME, archiveJson[INSTALLED_CONTENT_NAME].toString()},
|
||||||
{ INSTALLED_CONTENT_CREATION_TIME, archiveJson[INSTALLED_CONTENT_CREATION_TIME].toVariant().toLongLong() },
|
{ INSTALLED_CONTENT_CREATION_TIME, archiveJson[INSTALLED_CONTENT_CREATION_TIME].toVariant().toLongLong() },
|
||||||
{ INSTALLED_CONTENT_INSTALL_TIME, QDateTime::currentDateTime().currentMSecsSinceEpoch() },
|
{ INSTALLED_CONTENT_INSTALL_TIME, QDateTime::currentDateTime().currentMSecsSinceEpoch() },
|
||||||
{ INSTALLED_CONTENT_INSTALLED_BY, "" }
|
{ INSTALLED_CONTENT_INSTALLED_BY, username }
|
||||||
};
|
};
|
||||||
|
|
||||||
jsonObject.insert(INSTALLED_CONTENT, installed_content);
|
jsonObject.insert(INSTALLED_CONTENT, installed_content);
|
||||||
|
|
|
@ -28,7 +28,7 @@ public:
|
||||||
|
|
||||||
void createBackup(const QString& backupName, QuaZip& zip) override;
|
void createBackup(const QString& backupName, QuaZip& zip) override;
|
||||||
|
|
||||||
std::pair<bool, QString> recoverBackup(const QString& backupName, QuaZip& zip, const QString& sourceFilename) override;
|
std::pair<bool, QString> recoverBackup(const QString& backupName, QuaZip& zip, const QString& username, const QString& sourceFilename) override;
|
||||||
|
|
||||||
void deleteBackup(const QString& backupName) override {}
|
void deleteBackup(const QString& backupName) override {}
|
||||||
|
|
||||||
|
|
|
@ -279,7 +279,7 @@ void DomainContentBackupManager::deleteBackup(MiniPromise::Promise promise, cons
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DomainContentBackupManager::recoverFromBackupZip(const QString& backupName, QuaZip& zip, const QString& sourceFilename, bool rollingBack) {
|
bool DomainContentBackupManager::recoverFromBackupZip(const QString& backupName, QuaZip& zip, const QString& username, const QString& sourceFilename, bool rollingBack) {
|
||||||
if (!zip.open(QuaZip::Mode::mdUnzip)) {
|
if (!zip.open(QuaZip::Mode::mdUnzip)) {
|
||||||
qWarning() << "Failed to unzip file: " << backupName;
|
qWarning() << "Failed to unzip file: " << backupName;
|
||||||
return false;
|
return false;
|
||||||
|
@ -290,7 +290,7 @@ bool DomainContentBackupManager::recoverFromBackupZip(const QString& backupName,
|
||||||
for (auto& handler : _backupHandlers) {
|
for (auto& handler : _backupHandlers) {
|
||||||
bool success;
|
bool success;
|
||||||
QString errorStr;
|
QString errorStr;
|
||||||
std::tie(success, errorStr) = handler->recoverBackup(backupName, zip, sourceFilename);
|
std::tie(success, errorStr) = handler->recoverBackup(backupName, zip, username, sourceFilename);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
if (!rollingBack) {
|
if (!rollingBack) {
|
||||||
_recoveryError = errorStr;
|
_recoveryError = errorStr;
|
||||||
|
@ -304,7 +304,7 @@ bool DomainContentBackupManager::recoverFromBackupZip(const QString& backupName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void DomainContentBackupManager::recoverFromBackup(MiniPromise::Promise promise, const QString& backupName) {
|
void DomainContentBackupManager::recoverFromBackup(MiniPromise::Promise promise, const QString& backupName, const QString& username) {
|
||||||
if (_isRecovering) {
|
if (_isRecovering) {
|
||||||
promise->resolve({
|
promise->resolve({
|
||||||
{ "success", false }
|
{ "success", false }
|
||||||
|
@ -314,7 +314,7 @@ void DomainContentBackupManager::recoverFromBackup(MiniPromise::Promise promise,
|
||||||
|
|
||||||
if (QThread::currentThread() != thread()) {
|
if (QThread::currentThread() != thread()) {
|
||||||
QMetaObject::invokeMethod(this, "recoverFromBackup", Q_ARG(MiniPromise::Promise, promise),
|
QMetaObject::invokeMethod(this, "recoverFromBackup", Q_ARG(MiniPromise::Promise, promise),
|
||||||
Q_ARG(const QString&, backupName));
|
Q_ARG(const QString&, backupName), Q_ARG(const QString&, username));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ void DomainContentBackupManager::recoverFromBackup(MiniPromise::Promise promise,
|
||||||
if (backupFile.open(QIODevice::ReadOnly)) {
|
if (backupFile.open(QIODevice::ReadOnly)) {
|
||||||
QuaZip zip { &backupFile };
|
QuaZip zip { &backupFile };
|
||||||
|
|
||||||
success = recoverFromBackupZip(backupName, zip, backupName);
|
success = recoverFromBackupZip(backupName, zip, username, backupName);
|
||||||
|
|
||||||
backupFile.close();
|
backupFile.close();
|
||||||
} else {
|
} else {
|
||||||
|
@ -340,11 +340,11 @@ void DomainContentBackupManager::recoverFromBackup(MiniPromise::Promise promise,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void DomainContentBackupManager::recoverFromUploadedBackup(MiniPromise::Promise promise, QByteArray uploadedBackup) {
|
void DomainContentBackupManager::recoverFromUploadedBackup(MiniPromise::Promise promise, QByteArray uploadedBackup, QString username) {
|
||||||
|
|
||||||
if (QThread::currentThread() != thread()) {
|
if (QThread::currentThread() != thread()) {
|
||||||
QMetaObject::invokeMethod(this, "recoverFromUploadedBackup", Q_ARG(MiniPromise::Promise, promise),
|
QMetaObject::invokeMethod(this, "recoverFromUploadedBackup", Q_ARG(MiniPromise::Promise, promise),
|
||||||
Q_ARG(QByteArray, uploadedBackup));
|
Q_ARG(QByteArray, uploadedBackup), Q_ARG(QString, username));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -355,17 +355,17 @@ void DomainContentBackupManager::recoverFromUploadedBackup(MiniPromise::Promise
|
||||||
QuaZip uploadedZip { &uploadedBackupBuffer };
|
QuaZip uploadedZip { &uploadedBackupBuffer };
|
||||||
|
|
||||||
QString backupName = MANUAL_BACKUP_PREFIX + "uploaded.zip";
|
QString backupName = MANUAL_BACKUP_PREFIX + "uploaded.zip";
|
||||||
bool success = recoverFromBackupZip(backupName, uploadedZip, QString());
|
bool success = recoverFromBackupZip(backupName, uploadedZip, username, QString());
|
||||||
|
|
||||||
promise->resolve({
|
promise->resolve({
|
||||||
{ "success", success }
|
{ "success", success }
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void DomainContentBackupManager::recoverFromUploadedFile(MiniPromise::Promise promise, QString uploadedFilename, QString sourceFilename) {
|
void DomainContentBackupManager::recoverFromUploadedFile(MiniPromise::Promise promise, QString uploadedFilename, const QString username, QString sourceFilename) {
|
||||||
if (QThread::currentThread() != thread()) {
|
if (QThread::currentThread() != thread()) {
|
||||||
QMetaObject::invokeMethod(this, "recoverFromUploadedFile", Q_ARG(MiniPromise::Promise, promise),
|
QMetaObject::invokeMethod(this, "recoverFromUploadedFile", Q_ARG(MiniPromise::Promise, promise),
|
||||||
Q_ARG(QString, uploadedFilename), Q_ARG(QString, sourceFilename));
|
Q_ARG(QString, uploadedFilename), Q_ARG(QString, username), Q_ARG(QString, sourceFilename));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -382,7 +382,7 @@ void DomainContentBackupManager::recoverFromUploadedFile(MiniPromise::Promise pr
|
||||||
|
|
||||||
QString backupName = MANUAL_BACKUP_PREFIX + "uploaded.zip";
|
QString backupName = MANUAL_BACKUP_PREFIX + "uploaded.zip";
|
||||||
|
|
||||||
bool success = recoverFromBackupZip(backupName, uploadedZip, sourceFilename);
|
bool success = recoverFromBackupZip(backupName, uploadedZip, username, sourceFilename);
|
||||||
|
|
||||||
if (!success) {
|
if (!success) {
|
||||||
|
|
||||||
|
@ -394,7 +394,7 @@ void DomainContentBackupManager::recoverFromUploadedFile(MiniPromise::Promise pr
|
||||||
QuaZip uploadedZip { &uploadedFile };
|
QuaZip uploadedZip { &uploadedFile };
|
||||||
|
|
||||||
QString backupName = MANUAL_BACKUP_PREFIX + "uploaded.zip";
|
QString backupName = MANUAL_BACKUP_PREFIX + "uploaded.zip";
|
||||||
recoverFromBackupZip(backupName, uploadedZip, sourceFilename, true);
|
recoverFromBackupZip(backupName, uploadedZip, username, sourceFilename, true);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,9 +95,9 @@ public:
|
||||||
public slots:
|
public slots:
|
||||||
void getAllBackupsAndStatus(MiniPromise::Promise promise);
|
void getAllBackupsAndStatus(MiniPromise::Promise promise);
|
||||||
void createManualBackup(MiniPromise::Promise promise, const QString& name);
|
void createManualBackup(MiniPromise::Promise promise, const QString& name);
|
||||||
void recoverFromBackup(MiniPromise::Promise promise, const QString& backupName);
|
void recoverFromBackup(MiniPromise::Promise promise, const QString& backupName, const QString& username);
|
||||||
void recoverFromUploadedBackup(MiniPromise::Promise promise, QByteArray uploadedBackup);
|
void recoverFromUploadedBackup(MiniPromise::Promise promise, QByteArray uploadedBackup, QString username);
|
||||||
void recoverFromUploadedFile(MiniPromise::Promise promise, QString uploadedFilename, QString sourceFilename);
|
void recoverFromUploadedFile(MiniPromise::Promise promise, QString uploadedFilename, QString username, QString sourceFilename);
|
||||||
void deleteBackup(MiniPromise::Promise promise, const QString& backupName);
|
void deleteBackup(MiniPromise::Promise promise, const QString& backupName);
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
|
@ -119,7 +119,7 @@ protected:
|
||||||
|
|
||||||
std::pair<bool, QString> createBackup(const QString& prefix, const QString& name);
|
std::pair<bool, QString> createBackup(const QString& prefix, const QString& name);
|
||||||
|
|
||||||
bool recoverFromBackupZip(const QString& backupName, QuaZip& backupZip, const QString& sourceFilename, bool rollingBack = false);
|
bool recoverFromBackupZip(const QString& backupName, QuaZip& backupZip, const QString& username, const QString& sourceFilename, bool rollingBack = false);
|
||||||
|
|
||||||
private slots:
|
private slots:
|
||||||
void removeOldConsolidatedBackups();
|
void removeOldConsolidatedBackups();
|
||||||
|
|
|
@ -1957,6 +1957,7 @@ bool DomainServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url
|
||||||
QPointer<HTTPConnection> connectionPtr { connection };
|
QPointer<HTTPConnection> connectionPtr { connection };
|
||||||
|
|
||||||
auto nodeList = DependencyManager::get<LimitedNodeList>();
|
auto nodeList = DependencyManager::get<LimitedNodeList>();
|
||||||
|
QString username;
|
||||||
|
|
||||||
auto getSetting = [this](QString keyPath, QVariant& value) -> bool {
|
auto getSetting = [this](QString keyPath, QVariant& value) -> bool {
|
||||||
|
|
||||||
|
@ -2024,7 +2025,9 @@ bool DomainServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url
|
||||||
}
|
}
|
||||||
|
|
||||||
// all requests below require a cookie to prove authentication so check that first
|
// all requests below require a cookie to prove authentication so check that first
|
||||||
if (!isAuthenticatedRequest(connection, url)) {
|
bool isAuthenticated { false };
|
||||||
|
std::tie(isAuthenticated, username) = isAuthenticatedRequest(connection);
|
||||||
|
if (!isAuthenticated) {
|
||||||
// this is not an authenticated request
|
// this is not an authenticated request
|
||||||
// return true from the handler since it was handled with a 401 or re-direct to auth
|
// return true from the handler since it was handled with a 401 or re-direct to auth
|
||||||
return true;
|
return true;
|
||||||
|
@ -2361,7 +2364,7 @@ bool DomainServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url
|
||||||
connectionPtr->respond(success ? HTTPConnection::StatusCode200 : HTTPConnection::StatusCode400, docJSON.toJson(),
|
connectionPtr->respond(success ? HTTPConnection::StatusCode200 : HTTPConnection::StatusCode400, docJSON.toJson(),
|
||||||
JSON_MIME_TYPE.toUtf8());
|
JSON_MIME_TYPE.toUtf8());
|
||||||
});
|
});
|
||||||
_contentManager->recoverFromBackup(deferred, id);
|
_contentManager->recoverFromBackup(deferred, id, username);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else if (connection->requestOperation() == QNetworkAccessManager::PutOperation) {
|
} else if (connection->requestOperation() == QNetworkAccessManager::PutOperation) {
|
||||||
|
@ -2557,6 +2560,9 @@ bool DomainServer::processPendingContent(HTTPConnection* connection, QString ite
|
||||||
int sessionId = sessionIdBytes.toInt();
|
int sessionId = sessionIdBytes.toInt();
|
||||||
|
|
||||||
bool newUpload = itemName == "restore-file" || itemName == "restore-file-chunk-initial" || itemName == "restore-file-chunk-only";
|
bool newUpload = itemName == "restore-file" || itemName == "restore-file-chunk-initial" || itemName == "restore-file-chunk-only";
|
||||||
|
bool isAuthenticated;
|
||||||
|
QString username;
|
||||||
|
std::tie(isAuthenticated, username) = isAuthenticatedRequest(connection);
|
||||||
|
|
||||||
if (filename.endsWith(".zip", Qt::CaseInsensitive)) {
|
if (filename.endsWith(".zip", Qt::CaseInsensitive)) {
|
||||||
static const QString TEMPORARY_CONTENT_FILEPATH { QDir::tempPath() + "/hifiUploadContent_XXXXXX.zip" };
|
static const QString TEMPORARY_CONTENT_FILEPATH { QDir::tempPath() + "/hifiUploadContent_XXXXXX.zip" };
|
||||||
|
@ -2591,7 +2597,7 @@ bool DomainServer::processPendingContent(HTTPConnection* connection, QString ite
|
||||||
_pendingContentFiles.erase(sessionId);
|
_pendingContentFiles.erase(sessionId);
|
||||||
});
|
});
|
||||||
|
|
||||||
_contentManager->recoverFromUploadedFile(deferred, _pendingFileContent.fileName(), filename);
|
_contentManager->recoverFromUploadedFile(deferred, _pendingFileContent.fileName(), username, filename);
|
||||||
}
|
}
|
||||||
} else if (filename.endsWith(".json", Qt::CaseInsensitive)
|
} else if (filename.endsWith(".json", Qt::CaseInsensitive)
|
||||||
|| filename.endsWith(".json.gz", Qt::CaseInsensitive)) {
|
|| filename.endsWith(".json.gz", Qt::CaseInsensitive)) {
|
||||||
|
@ -2604,7 +2610,7 @@ bool DomainServer::processPendingContent(HTTPConnection* connection, QString ite
|
||||||
|
|
||||||
if (itemName == "restore-file" || itemName == "restore-file-chunk-final" || itemName == "restore-file-chunk-only") {
|
if (itemName == "restore-file" || itemName == "restore-file-chunk-final" || itemName == "restore-file-chunk-only") {
|
||||||
// invoke our method to hand the new octree file off to the octree server
|
// invoke our method to hand the new octree file off to the octree server
|
||||||
if (!handleOctreeFileReplacement(_pendingUploadedContent, filename, QString())) {
|
if (!handleOctreeFileReplacement(_pendingUploadedContent, filename, QString(), username)) {
|
||||||
connection->respond(HTTPConnection::StatusCode400);
|
connection->respond(HTTPConnection::StatusCode400);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -2680,7 +2686,7 @@ void DomainServer::profileRequestFinished() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DomainServer::isAuthenticatedRequest(HTTPConnection* connection, const QUrl& url) {
|
std::pair<bool, QString> DomainServer::isAuthenticatedRequest(HTTPConnection* connection) {
|
||||||
|
|
||||||
static const QByteArray HTTP_COOKIE_HEADER_KEY = "Cookie";
|
static const QByteArray HTTP_COOKIE_HEADER_KEY = "Cookie";
|
||||||
static const QString ADMIN_USERS_CONFIG_KEY = "admin-users";
|
static const QString ADMIN_USERS_CONFIG_KEY = "admin-users";
|
||||||
|
@ -2717,7 +2723,7 @@ bool DomainServer::isAuthenticatedRequest(HTTPConnection* connection, const QUrl
|
||||||
|
|
||||||
if (_settingsManager.valueForKeyPath(ADMIN_USERS_CONFIG_KEY).toStringList().contains(profileUsername)) {
|
if (_settingsManager.valueForKeyPath(ADMIN_USERS_CONFIG_KEY).toStringList().contains(profileUsername)) {
|
||||||
// this is an authenticated user
|
// this is an authenticated user
|
||||||
return true;
|
return { true, profileUsername };
|
||||||
}
|
}
|
||||||
|
|
||||||
// loop the roles of this user and see if they are in the admin-roles array
|
// loop the roles of this user and see if they are in the admin-roles array
|
||||||
|
@ -2727,7 +2733,7 @@ bool DomainServer::isAuthenticatedRequest(HTTPConnection* connection, const QUrl
|
||||||
foreach(const QString& userRole, sessionData.getRoles()) {
|
foreach(const QString& userRole, sessionData.getRoles()) {
|
||||||
if (adminRolesArray.contains(userRole)) {
|
if (adminRolesArray.contains(userRole)) {
|
||||||
// this user has a role that allows them to administer the domain-server
|
// this user has a role that allows them to administer the domain-server
|
||||||
return true;
|
return { true, profileUsername };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2735,7 +2741,7 @@ bool DomainServer::isAuthenticatedRequest(HTTPConnection* connection, const QUrl
|
||||||
connection->respond(HTTPConnection::StatusCode401, UNAUTHENTICATED_BODY);
|
connection->respond(HTTPConnection::StatusCode401, UNAUTHENTICATED_BODY);
|
||||||
|
|
||||||
// the user does not have allowed username or role, return 401
|
// the user does not have allowed username or role, return 401
|
||||||
return false;
|
return { false, QString() };
|
||||||
} else {
|
} else {
|
||||||
static const QByteArray REQUESTED_WITH_HEADER = "X-Requested-With";
|
static const QByteArray REQUESTED_WITH_HEADER = "X-Requested-With";
|
||||||
static const QString XML_REQUESTED_WITH = "XMLHttpRequest";
|
static const QString XML_REQUESTED_WITH = "XMLHttpRequest";
|
||||||
|
@ -2764,7 +2770,7 @@ bool DomainServer::isAuthenticatedRequest(HTTPConnection* connection, const QUrl
|
||||||
}
|
}
|
||||||
|
|
||||||
// we don't know about this user yet, so they are not yet authenticated
|
// we don't know about this user yet, so they are not yet authenticated
|
||||||
return false;
|
return { false, QString() };
|
||||||
}
|
}
|
||||||
} else if (_settingsManager.valueForKeyPath(BASIC_AUTH_USERNAME_KEY_PATH).isValid()) {
|
} else if (_settingsManager.valueForKeyPath(BASIC_AUTH_USERNAME_KEY_PATH).isValid()) {
|
||||||
// config file contains username and password combinations for basic auth
|
// config file contains username and password combinations for basic auth
|
||||||
|
@ -2793,7 +2799,7 @@ bool DomainServer::isAuthenticatedRequest(HTTPConnection* connection, const QUrl
|
||||||
"" : QCryptographicHash::hash(headerPassword.toUtf8(), QCryptographicHash::Sha256).toHex();
|
"" : QCryptographicHash::hash(headerPassword.toUtf8(), QCryptographicHash::Sha256).toHex();
|
||||||
|
|
||||||
if (settingsUsername == headerUsername && hexHeaderPassword == settingsPassword) {
|
if (settingsUsername == headerUsername && hexHeaderPassword == settingsPassword) {
|
||||||
return true;
|
return { true, headerUsername };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2815,11 +2821,11 @@ bool DomainServer::isAuthenticatedRequest(HTTPConnection* connection, const QUrl
|
||||||
HTTPConnection::DefaultContentType, basicAuthHeader);
|
HTTPConnection::DefaultContentType, basicAuthHeader);
|
||||||
|
|
||||||
// not authenticated, bubble up false
|
// not authenticated, bubble up false
|
||||||
return false;
|
return { false, QString() };
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// we don't have an OAuth URL + admin roles/usernames, so all users are authenticated
|
// we don't have an OAuth URL + admin roles/usernames, so all users are authenticated
|
||||||
return true;
|
return { true, QString() };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3493,7 +3499,7 @@ void DomainServer::maybeHandleReplacementEntityFile() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DomainServer::handleOctreeFileReplacement(QByteArray octreeFile, QString sourceFilename, QString name) {
|
bool DomainServer::handleOctreeFileReplacement(QByteArray octreeFile, QString sourceFilename, QString name, QString username) {
|
||||||
OctreeUtils::RawEntityData data;
|
OctreeUtils::RawEntityData data;
|
||||||
if (data.readOctreeDataInfoFromData(octreeFile)) {
|
if (data.readOctreeDataInfoFromData(octreeFile)) {
|
||||||
data.resetIdAndVersion();
|
data.resetIdAndVersion();
|
||||||
|
@ -3514,7 +3520,7 @@ bool DomainServer::handleOctreeFileReplacement(QByteArray octreeFile, QString so
|
||||||
{ INSTALLED_CONTENT_NAME, name },
|
{ INSTALLED_CONTENT_NAME, name },
|
||||||
{ INSTALLED_CONTENT_CREATION_TIME, 0 },
|
{ INSTALLED_CONTENT_CREATION_TIME, 0 },
|
||||||
{ INSTALLED_CONTENT_INSTALL_TIME, QDateTime::currentDateTime().currentMSecsSinceEpoch() },
|
{ INSTALLED_CONTENT_INSTALL_TIME, QDateTime::currentDateTime().currentMSecsSinceEpoch() },
|
||||||
{ INSTALLED_CONTENT_INSTALLED_BY, "" }
|
{ INSTALLED_CONTENT_INSTALLED_BY, username }
|
||||||
};
|
};
|
||||||
|
|
||||||
QJsonObject jsonObject { { INSTALLED_CONTENT, installed_content } };
|
QJsonObject jsonObject { { INSTALLED_CONTENT, installed_content } };
|
||||||
|
@ -3539,6 +3545,11 @@ void DomainServer::handleDomainContentReplacementFromURLRequest(QSharedPointer<R
|
||||||
qInfo() << "Received request to replace content from a url";
|
qInfo() << "Received request to replace content from a url";
|
||||||
auto node = DependencyManager::get<LimitedNodeList>()->findNodeWithAddr(message->getSenderSockAddr());
|
auto node = DependencyManager::get<LimitedNodeList>()->findNodeWithAddr(message->getSenderSockAddr());
|
||||||
if (node && node->getCanReplaceContent()) {
|
if (node && node->getCanReplaceContent()) {
|
||||||
|
DomainServerNodeData* nodeData = static_cast<DomainServerNodeData*>(node->getLinkedData());
|
||||||
|
QString username;
|
||||||
|
if (nodeData) {
|
||||||
|
username = nodeData->getUsername();
|
||||||
|
}
|
||||||
// Convert message data into our URL
|
// Convert message data into our URL
|
||||||
QString url(message->getMessage());
|
QString url(message->getMessage());
|
||||||
QUrl modelsURL = QUrl(url, QUrl::StrictMode);
|
QUrl modelsURL = QUrl(url, QUrl::StrictMode);
|
||||||
|
@ -3548,17 +3559,17 @@ void DomainServer::handleDomainContentReplacementFromURLRequest(QSharedPointer<R
|
||||||
|
|
||||||
qDebug() << "Downloading JSON from: " << modelsURL.toString(QUrl::FullyEncoded);
|
qDebug() << "Downloading JSON from: " << modelsURL.toString(QUrl::FullyEncoded);
|
||||||
|
|
||||||
connect(reply, &QNetworkReply::finished, [this, reply, modelsURL]() {
|
connect(reply, &QNetworkReply::finished, [this, reply, modelsURL, username]() {
|
||||||
QNetworkReply::NetworkError networkError = reply->error();
|
QNetworkReply::NetworkError networkError = reply->error();
|
||||||
if (networkError == QNetworkReply::NoError) {
|
if (networkError == QNetworkReply::NoError) {
|
||||||
if (modelsURL.fileName().endsWith(".json.gz")) {
|
if (modelsURL.fileName().endsWith(".json.gz")) {
|
||||||
QUrlQuery urlQuery(modelsURL.query(QUrl::FullyEncoded));
|
QUrlQuery urlQuery(modelsURL.query(QUrl::FullyEncoded));
|
||||||
|
|
||||||
QString itemName = urlQuery.queryItemValue(CONTENT_SET_NAME_QUERY_PARAM);
|
QString itemName = urlQuery.queryItemValue(CONTENT_SET_NAME_QUERY_PARAM);
|
||||||
handleOctreeFileReplacement(reply->readAll(), modelsURL.fileName(), itemName);
|
handleOctreeFileReplacement(reply->readAll(), modelsURL.fileName(), itemName, username);
|
||||||
} else if (modelsURL.fileName().endsWith(".zip")) {
|
} else if (modelsURL.fileName().endsWith(".zip")) {
|
||||||
auto deferred = makePromise("recoverFromUploadedBackup");
|
auto deferred = makePromise("recoverFromUploadedBackup");
|
||||||
_contentManager->recoverFromUploadedBackup(deferred, reply->readAll());
|
_contentManager->recoverFromUploadedBackup(deferred, reply->readAll(), username);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
qDebug() << "Error downloading JSON from specified file: " << modelsURL;
|
qDebug() << "Error downloading JSON from specified file: " << modelsURL;
|
||||||
|
@ -3569,7 +3580,12 @@ void DomainServer::handleDomainContentReplacementFromURLRequest(QSharedPointer<R
|
||||||
|
|
||||||
void DomainServer::handleOctreeFileReplacementRequest(QSharedPointer<ReceivedMessage> message) {
|
void DomainServer::handleOctreeFileReplacementRequest(QSharedPointer<ReceivedMessage> message) {
|
||||||
auto node = DependencyManager::get<NodeList>()->nodeWithLocalID(message->getSourceID());
|
auto node = DependencyManager::get<NodeList>()->nodeWithLocalID(message->getSourceID());
|
||||||
if (node->getCanReplaceContent()) {
|
if (node && node->getCanReplaceContent()) {
|
||||||
handleOctreeFileReplacement(message->readAll(), QString(), QString());
|
QString username;
|
||||||
|
DomainServerNodeData* nodeData = static_cast<DomainServerNodeData*>(node->getLinkedData());
|
||||||
|
if (nodeData) {
|
||||||
|
username = nodeData->getUsername();
|
||||||
|
}
|
||||||
|
handleOctreeFileReplacement(message->readAll(), QString(), QString(), username);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ private slots:
|
||||||
|
|
||||||
void handleDomainContentReplacementFromURLRequest(QSharedPointer<ReceivedMessage> message);
|
void handleDomainContentReplacementFromURLRequest(QSharedPointer<ReceivedMessage> message);
|
||||||
void handleOctreeFileReplacementRequest(QSharedPointer<ReceivedMessage> message);
|
void handleOctreeFileReplacementRequest(QSharedPointer<ReceivedMessage> message);
|
||||||
bool handleOctreeFileReplacement(QByteArray octreeFile, QString sourceFilename, QString name);
|
bool handleOctreeFileReplacement(QByteArray octreeFile, QString sourceFilename, QString name, QString username);
|
||||||
|
|
||||||
void processOctreeDataRequestMessage(QSharedPointer<ReceivedMessage> message);
|
void processOctreeDataRequestMessage(QSharedPointer<ReceivedMessage> message);
|
||||||
void processOctreeDataPersistMessage(QSharedPointer<ReceivedMessage> message);
|
void processOctreeDataPersistMessage(QSharedPointer<ReceivedMessage> message);
|
||||||
|
@ -194,7 +194,7 @@ private:
|
||||||
QUrl oauthRedirectURL();
|
QUrl oauthRedirectURL();
|
||||||
QUrl oauthAuthorizationURL(const QUuid& stateUUID = QUuid::createUuid());
|
QUrl oauthAuthorizationURL(const QUuid& stateUUID = QUuid::createUuid());
|
||||||
|
|
||||||
bool isAuthenticatedRequest(HTTPConnection* connection, const QUrl& url);
|
std::pair<bool, QString> isAuthenticatedRequest(HTTPConnection* connection);
|
||||||
|
|
||||||
QNetworkReply* profileRequestGivenTokenReply(QNetworkReply* tokenReply);
|
QNetworkReply* profileRequestGivenTokenReply(QNetworkReply* tokenReply);
|
||||||
Headers setupCookieHeadersFromProfileReply(QNetworkReply* profileReply);
|
Headers setupCookieHeadersFromProfileReply(QNetworkReply* profileReply);
|
||||||
|
|
|
@ -57,7 +57,7 @@ void EntitiesBackupHandler::createBackup(const QString& backupName, QuaZip& zip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<bool, QString> EntitiesBackupHandler::recoverBackup(const QString& backupName, QuaZip& zip, const QString& sourceFilename) {
|
std::pair<bool, QString> EntitiesBackupHandler::recoverBackup(const QString& backupName, QuaZip& zip, const QString& username, const QString& sourceFilename) {
|
||||||
if (!zip.setCurrentFile(ENTITIES_BACKUP_FILENAME)) {
|
if (!zip.setCurrentFile(ENTITIES_BACKUP_FILENAME)) {
|
||||||
QString errorStr("Failed to find " + ENTITIES_BACKUP_FILENAME + " while recovering backup");
|
QString errorStr("Failed to find " + ENTITIES_BACKUP_FILENAME + " while recovering backup");
|
||||||
qWarning() << errorStr;
|
qWarning() << errorStr;
|
||||||
|
|
|
@ -29,7 +29,7 @@ public:
|
||||||
void createBackup(const QString& backupName, QuaZip& zip) override;
|
void createBackup(const QString& backupName, QuaZip& zip) override;
|
||||||
|
|
||||||
// Recover from a full backup
|
// Recover from a full backup
|
||||||
std::pair<bool, QString> recoverBackup(const QString& backupName, QuaZip& zip, const QString& sourceFilename) override;
|
std::pair<bool, QString> recoverBackup(const QString& backupName, QuaZip& zip, const QString& username, const QString& sourceFilename) override;
|
||||||
|
|
||||||
// Delete a skeleton backup
|
// Delete a skeleton backup
|
||||||
void deleteBackup(const QString& backupName) override {}
|
void deleteBackup(const QString& backupName) override {}
|
||||||
|
|
|
@ -94,6 +94,10 @@ ANDROID_PACKAGES = {
|
||||||
'checksum': 'ddcb23df336b08017042ba4786db1d9e',
|
'checksum': 'ddcb23df336b08017042ba4786db1d9e',
|
||||||
'sharedLibFolder': 'lib',
|
'sharedLibFolder': 'lib',
|
||||||
'includeLibs': {'libbreakpad_client.a'}
|
'includeLibs': {'libbreakpad_client.a'}
|
||||||
|
},
|
||||||
|
'webrtc': {
|
||||||
|
'file': 'webrtc-20190626-android.tar.gz',
|
||||||
|
'checksum': 'e2dccd3d8efdcba6d428c87ba7fb2a53'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -166,16 +166,16 @@ Rectangle {
|
||||||
x: 2 * margins.paddings;
|
x: 2 * margins.paddings;
|
||||||
width: parent.width;
|
width: parent.width;
|
||||||
// switch heights + 2 * top margins
|
// switch heights + 2 * top margins
|
||||||
height: (root.switchHeight) * 3 + 48;
|
height: (root.switchHeight) * 6 + 48;
|
||||||
anchors.top: firstSeparator.bottom;
|
anchors.top: firstSeparator.bottom;
|
||||||
anchors.topMargin: 10;
|
anchors.topMargin: 10;
|
||||||
|
|
||||||
// mute is in its own row
|
|
||||||
Item {
|
Item {
|
||||||
id: switchContainer;
|
id: switchContainer;
|
||||||
x: margins.paddings;
|
x: margins.paddings;
|
||||||
width: parent.width / 2;
|
width: parent.width / 2;
|
||||||
height: parent.height;
|
height: parent.height;
|
||||||
|
anchors.top: parent.top
|
||||||
anchors.left: parent.left;
|
anchors.left: parent.left;
|
||||||
HifiControlsUit.Switch {
|
HifiControlsUit.Switch {
|
||||||
id: muteMic;
|
id: muteMic;
|
||||||
|
@ -222,12 +222,29 @@ Rectangle {
|
||||||
}
|
}
|
||||||
|
|
||||||
HifiControlsUit.Switch {
|
HifiControlsUit.Switch {
|
||||||
id: pttSwitch
|
id: acousticEchoCancellationSwitch;
|
||||||
height: root.switchHeight;
|
height: root.switchHeight;
|
||||||
switchWidth: root.switchWidth;
|
switchWidth: root.switchWidth;
|
||||||
anchors.top: noiseReductionSwitch.bottom
|
anchors.top: noiseReductionSwitch.bottom
|
||||||
anchors.topMargin: 24
|
anchors.topMargin: 24
|
||||||
anchors.left: parent.left
|
anchors.left: parent.left
|
||||||
|
labelTextOn: "Echo Cancellation";
|
||||||
|
labelTextSize: 16;
|
||||||
|
backgroundOnColor: "#E3E3E3";
|
||||||
|
checked: AudioScriptingInterface.acousticEchoCancellation;
|
||||||
|
onCheckedChanged: {
|
||||||
|
AudioScriptingInterface.acousticEchoCancellation = checked;
|
||||||
|
checked = Qt.binding(function() { return AudioScriptingInterface.acousticEchoCancellation; });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HifiControlsUit.Switch {
|
||||||
|
id: pttSwitch
|
||||||
|
height: root.switchHeight;
|
||||||
|
switchWidth: root.switchWidth;
|
||||||
|
anchors.top: acousticEchoCancellationSwitch.bottom;
|
||||||
|
anchors.topMargin: 24
|
||||||
|
anchors.left: parent.left
|
||||||
labelTextOn: (bar.currentIndex === 0) ? qsTr("Push To Talk (T)") : qsTr("Push To Talk");
|
labelTextOn: (bar.currentIndex === 0) ? qsTr("Push To Talk (T)") : qsTr("Push To Talk");
|
||||||
labelTextSize: 16;
|
labelTextSize: 16;
|
||||||
backgroundOnColor: "#E3E3E3";
|
backgroundOnColor: "#E3E3E3";
|
||||||
|
@ -298,7 +315,6 @@ Rectangle {
|
||||||
checked = Qt.binding(function() { return AudioScriptingInterface.isStereoInput; }); // restore binding
|
checked = Qt.binding(function() { return AudioScriptingInterface.isStereoInput; }); // restore binding
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -222,6 +222,17 @@ Flickable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SimplifiedControls.Switch {
|
||||||
|
id: acousticEchoCancellationSwitch
|
||||||
|
Layout.preferredHeight: 18
|
||||||
|
Layout.preferredWidth: parent.width
|
||||||
|
labelTextOn: "Acoustic Echo Cancellation"
|
||||||
|
checked: AudioScriptingInterface.acousticEchoCancellation
|
||||||
|
onClicked: {
|
||||||
|
AudioScriptingInterface.acousticEchoCancellation = !AudioScriptingInterface.acousticEchoCancellation;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ QString Audio::HMD { "VR" };
|
||||||
|
|
||||||
Setting::Handle<bool> enableNoiseReductionSetting { QStringList { Audio::AUDIO, "NoiseReduction" }, true };
|
Setting::Handle<bool> enableNoiseReductionSetting { QStringList { Audio::AUDIO, "NoiseReduction" }, true };
|
||||||
Setting::Handle<bool> enableWarnWhenMutedSetting { QStringList { Audio::AUDIO, "WarnWhenMuted" }, true };
|
Setting::Handle<bool> enableWarnWhenMutedSetting { QStringList { Audio::AUDIO, "WarnWhenMuted" }, true };
|
||||||
|
Setting::Handle<bool> enableAcousticEchoCancellationSetting { QStringList { Audio::AUDIO, "AcousticEchoCancellation" }, true };
|
||||||
|
|
||||||
|
|
||||||
float Audio::loudnessToLevel(float loudness) {
|
float Audio::loudnessToLevel(float loudness) {
|
||||||
|
@ -40,12 +41,14 @@ Audio::Audio() : _devices(_contextIsHMD) {
|
||||||
connect(client, &AudioClient::muteToggled, this, &Audio::setMuted);
|
connect(client, &AudioClient::muteToggled, this, &Audio::setMuted);
|
||||||
connect(client, &AudioClient::noiseReductionChanged, this, &Audio::enableNoiseReduction);
|
connect(client, &AudioClient::noiseReductionChanged, this, &Audio::enableNoiseReduction);
|
||||||
connect(client, &AudioClient::warnWhenMutedChanged, this, &Audio::enableWarnWhenMuted);
|
connect(client, &AudioClient::warnWhenMutedChanged, this, &Audio::enableWarnWhenMuted);
|
||||||
|
connect(client, &AudioClient::acousticEchoCancellationChanged, this, &Audio::enableAcousticEchoCancellation);
|
||||||
connect(client, &AudioClient::inputLoudnessChanged, this, &Audio::onInputLoudnessChanged);
|
connect(client, &AudioClient::inputLoudnessChanged, this, &Audio::onInputLoudnessChanged);
|
||||||
connect(client, &AudioClient::inputVolumeChanged, this, &Audio::setInputVolume);
|
connect(client, &AudioClient::inputVolumeChanged, this, &Audio::setInputVolume);
|
||||||
connect(this, &Audio::contextChanged, &_devices, &AudioDevices::onContextChanged);
|
connect(this, &Audio::contextChanged, &_devices, &AudioDevices::onContextChanged);
|
||||||
connect(this, &Audio::pushingToTalkChanged, this, &Audio::handlePushedToTalk);
|
connect(this, &Audio::pushingToTalkChanged, this, &Audio::handlePushedToTalk);
|
||||||
enableNoiseReduction(enableNoiseReductionSetting.get());
|
enableNoiseReduction(enableNoiseReductionSetting.get());
|
||||||
enableWarnWhenMuted(enableWarnWhenMutedSetting.get());
|
enableWarnWhenMuted(enableWarnWhenMutedSetting.get());
|
||||||
|
enableAcousticEchoCancellation(enableAcousticEchoCancellationSetting.get());
|
||||||
onContextChanged();
|
onContextChanged();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,6 +280,28 @@ void Audio::enableWarnWhenMuted(bool enable) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Audio::acousticEchoCancellationEnabled() const {
|
||||||
|
return resultWithReadLock<bool>([&] {
|
||||||
|
return _enableAcousticEchoCancellation;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::enableAcousticEchoCancellation(bool enable) {
|
||||||
|
bool changed = false;
|
||||||
|
withWriteLock([&] {
|
||||||
|
if (_enableAcousticEchoCancellation != enable) {
|
||||||
|
_enableAcousticEchoCancellation = enable;
|
||||||
|
auto client = DependencyManager::get<AudioClient>().data();
|
||||||
|
QMetaObject::invokeMethod(client, "setAcousticEchoCancellation", Q_ARG(bool, enable), Q_ARG(bool, false));
|
||||||
|
enableAcousticEchoCancellationSetting.set(enable);
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (changed) {
|
||||||
|
emit acousticEchoCancellationChanged(enable);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
float Audio::getInputVolume() const {
|
float Audio::getInputVolume() const {
|
||||||
return resultWithReadLock<bool>([&] {
|
return resultWithReadLock<bool>([&] {
|
||||||
return _inputVolume;
|
return _inputVolume;
|
||||||
|
|
|
@ -72,6 +72,9 @@ class Audio : public AudioScriptingInterface, protected ReadWriteLockable {
|
||||||
* @property {number} systemInjectorGain - The gain (relative volume) that system sounds are played at.
|
* @property {number} systemInjectorGain - The gain (relative volume) that system sounds are played at.
|
||||||
* @property {number} pushingToTalkOutputGainDesktop - The gain (relative volume) that all sounds are played at when the user is holding
|
* @property {number} pushingToTalkOutputGainDesktop - The gain (relative volume) that all sounds are played at when the user is holding
|
||||||
* the push-to-talk key in Desktop mode.
|
* the push-to-talk key in Desktop mode.
|
||||||
|
* @property {boolean} acousticEchoCancellation - <code>true</code> if audio-echo-cancellation is enabled, otherwise
|
||||||
|
* <code>false</code>. When enabled, sound from the audio output will be suppressed when it echos back to the
|
||||||
|
* input audio signal.
|
||||||
*
|
*
|
||||||
* @comment The following properties are from AudioScriptingInterface.h.
|
* @comment The following properties are from AudioScriptingInterface.h.
|
||||||
* @property {boolean} isStereoInput - <code>true</code> if the input audio is being used in stereo, otherwise
|
* @property {boolean} isStereoInput - <code>true</code> if the input audio is being used in stereo, otherwise
|
||||||
|
@ -85,6 +88,8 @@ class Audio : public AudioScriptingInterface, protected ReadWriteLockable {
|
||||||
Q_PROPERTY(bool muted READ isMuted WRITE setMuted NOTIFY mutedChanged)
|
Q_PROPERTY(bool muted READ isMuted WRITE setMuted NOTIFY mutedChanged)
|
||||||
Q_PROPERTY(bool noiseReduction READ noiseReductionEnabled WRITE enableNoiseReduction NOTIFY noiseReductionChanged)
|
Q_PROPERTY(bool noiseReduction READ noiseReductionEnabled WRITE enableNoiseReduction NOTIFY noiseReductionChanged)
|
||||||
Q_PROPERTY(bool warnWhenMuted READ warnWhenMutedEnabled WRITE enableWarnWhenMuted NOTIFY warnWhenMutedChanged)
|
Q_PROPERTY(bool warnWhenMuted READ warnWhenMutedEnabled WRITE enableWarnWhenMuted NOTIFY warnWhenMutedChanged)
|
||||||
|
Q_PROPERTY(bool acousticEchoCancellation
|
||||||
|
READ acousticEchoCancellationEnabled WRITE enableAcousticEchoCancellation NOTIFY acousticEchoCancellationChanged)
|
||||||
Q_PROPERTY(float inputVolume READ getInputVolume WRITE setInputVolume NOTIFY inputVolumeChanged)
|
Q_PROPERTY(float inputVolume READ getInputVolume WRITE setInputVolume NOTIFY inputVolumeChanged)
|
||||||
Q_PROPERTY(float inputLevel READ getInputLevel NOTIFY inputLevelChanged)
|
Q_PROPERTY(float inputLevel READ getInputLevel NOTIFY inputLevelChanged)
|
||||||
Q_PROPERTY(bool clipping READ isClipping NOTIFY clippingChanged)
|
Q_PROPERTY(bool clipping READ isClipping NOTIFY clippingChanged)
|
||||||
|
@ -115,6 +120,7 @@ public:
|
||||||
bool isMuted() const;
|
bool isMuted() const;
|
||||||
bool noiseReductionEnabled() const;
|
bool noiseReductionEnabled() const;
|
||||||
bool warnWhenMutedEnabled() const;
|
bool warnWhenMutedEnabled() const;
|
||||||
|
bool acousticEchoCancellationEnabled() const;
|
||||||
float getInputVolume() const;
|
float getInputVolume() const;
|
||||||
float getInputLevel() const;
|
float getInputLevel() const;
|
||||||
bool isClipping() const;
|
bool isClipping() const;
|
||||||
|
@ -396,6 +402,14 @@ signals:
|
||||||
*/
|
*/
|
||||||
void warnWhenMutedChanged(bool isEnabled);
|
void warnWhenMutedChanged(bool isEnabled);
|
||||||
|
|
||||||
|
/**jsdoc
|
||||||
|
* Triggered when acoustic echo cancellation is enabled or disabled.
|
||||||
|
* @function Audio.acousticEchoCancellationChanged
|
||||||
|
* @param {boolean} isEnabled - <code>true</code> if acoustic echo cancellation is enabled, otherwise <code>false</code>.
|
||||||
|
* @returns {Signal}
|
||||||
|
*/
|
||||||
|
void acousticEchoCancellationChanged(bool isEnabled);
|
||||||
|
|
||||||
/**jsdoc
|
/**jsdoc
|
||||||
* Triggered when the input audio volume changes.
|
* Triggered when the input audio volume changes.
|
||||||
* @function Audio.inputVolumeChanged
|
* @function Audio.inputVolumeChanged
|
||||||
|
@ -494,6 +508,7 @@ private slots:
|
||||||
void setMuted(bool muted);
|
void setMuted(bool muted);
|
||||||
void enableNoiseReduction(bool enable);
|
void enableNoiseReduction(bool enable);
|
||||||
void enableWarnWhenMuted(bool enable);
|
void enableWarnWhenMuted(bool enable);
|
||||||
|
void enableAcousticEchoCancellation(bool enable);
|
||||||
void setInputVolume(float volume);
|
void setInputVolume(float volume);
|
||||||
void onInputLoudnessChanged(float loudness, bool isClipping);
|
void onInputLoudnessChanged(float loudness, bool isClipping);
|
||||||
|
|
||||||
|
@ -512,6 +527,7 @@ private:
|
||||||
bool _isClipping { false };
|
bool _isClipping { false };
|
||||||
bool _enableNoiseReduction { true }; // Match default value of AudioClient::_isNoiseGateEnabled.
|
bool _enableNoiseReduction { true }; // Match default value of AudioClient::_isNoiseGateEnabled.
|
||||||
bool _enableWarnWhenMuted { true };
|
bool _enableWarnWhenMuted { true };
|
||||||
|
bool _enableAcousticEchoCancellation { true }; // AudioClient::_isAECEnabled
|
||||||
bool _contextIsHMD { false };
|
bool _contextIsHMD { false };
|
||||||
AudioDevices* getDevices() { return &_devices; }
|
AudioDevices* getDevices() { return &_devices; }
|
||||||
AudioDevices _devices;
|
AudioDevices _devices;
|
||||||
|
|
|
@ -41,7 +41,6 @@
|
||||||
#include "MainWindow.h"
|
#include "MainWindow.h"
|
||||||
#include "Snapshot.h"
|
#include "Snapshot.h"
|
||||||
#include "SnapshotUploader.h"
|
#include "SnapshotUploader.h"
|
||||||
#include "ToneMappingEffect.h"
|
|
||||||
|
|
||||||
// filename format: hifi-snap-by-%username%-on-%date%_%time%_@-%location%.jpg
|
// filename format: hifi-snap-by-%username%-on-%date%_%time%_@-%location%.jpg
|
||||||
// %1 <= username, %2 <= date and time, %3 <= current location
|
// %1 <= username, %2 <= date and time, %3 <= current location
|
||||||
|
|
|
@ -7,6 +7,11 @@ link_hifi_libraries(audio plugins)
|
||||||
include_hifi_library_headers(shared)
|
include_hifi_library_headers(shared)
|
||||||
include_hifi_library_headers(networking)
|
include_hifi_library_headers(networking)
|
||||||
|
|
||||||
|
if (ANDROID)
|
||||||
|
else ()
|
||||||
|
target_webrtc()
|
||||||
|
endif ()
|
||||||
|
|
||||||
# append audio includes to our list of includes to bubble
|
# append audio includes to our list of includes to bubble
|
||||||
target_include_directories(${TARGET_NAME} PUBLIC "${HIFI_LIBRARY_DIR}/audio/src")
|
target_include_directories(${TARGET_NAME} PUBLIC "${HIFI_LIBRARY_DIR}/audio/src")
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef WIN32
|
#ifdef WIN32
|
||||||
#define WIN32_LEAN_AND_MEAN
|
#define WIN32_LEAN_AND_MEAN 1
|
||||||
#include <windows.h>
|
#include <windows.h>
|
||||||
#include <Mmsystem.h>
|
#include <Mmsystem.h>
|
||||||
#include <mmdeviceapi.h>
|
#include <mmdeviceapi.h>
|
||||||
|
@ -286,6 +286,7 @@ AudioClient::AudioClient() :
|
||||||
_shouldEchoLocally(false),
|
_shouldEchoLocally(false),
|
||||||
_shouldEchoToServer(false),
|
_shouldEchoToServer(false),
|
||||||
_isNoiseGateEnabled(true),
|
_isNoiseGateEnabled(true),
|
||||||
|
_isAECEnabled(true),
|
||||||
_reverb(false),
|
_reverb(false),
|
||||||
_reverbOptions(&_scriptReverbOptions),
|
_reverbOptions(&_scriptReverbOptions),
|
||||||
_inputToNetworkResampler(NULL),
|
_inputToNetworkResampler(NULL),
|
||||||
|
@ -302,6 +303,7 @@ AudioClient::AudioClient() :
|
||||||
_isHeadsetPluggedIn(false),
|
_isHeadsetPluggedIn(false),
|
||||||
#endif
|
#endif
|
||||||
_orientationGetter(DEFAULT_ORIENTATION_GETTER) {
|
_orientationGetter(DEFAULT_ORIENTATION_GETTER) {
|
||||||
|
|
||||||
// avoid putting a lock in the device callback
|
// avoid putting a lock in the device callback
|
||||||
assert(_localSamplesAvailable.is_lock_free());
|
assert(_localSamplesAvailable.is_lock_free());
|
||||||
|
|
||||||
|
@ -353,6 +355,10 @@ AudioClient::AudioClient() :
|
||||||
|
|
||||||
configureReverb();
|
configureReverb();
|
||||||
|
|
||||||
|
#if defined(WEBRTC_ENABLED)
|
||||||
|
configureWebrtc();
|
||||||
|
#endif
|
||||||
|
|
||||||
auto nodeList = DependencyManager::get<NodeList>();
|
auto nodeList = DependencyManager::get<NodeList>();
|
||||||
auto& packetReceiver = nodeList->getPacketReceiver();
|
auto& packetReceiver = nodeList->getPacketReceiver();
|
||||||
packetReceiver.registerListener(PacketType::AudioStreamStats, &_stats, "processStreamStatsPacket");
|
packetReceiver.registerListener(PacketType::AudioStreamStats, &_stats, "processStreamStatsPacket");
|
||||||
|
@ -1084,6 +1090,131 @@ void AudioClient::setReverbOptions(const AudioEffectOptions* options) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(WEBRTC_ENABLED)
|
||||||
|
|
||||||
|
static void deinterleaveToFloat(const int16_t* src, float* const* dst, int numFrames, int numChannels) {
|
||||||
|
for (int i = 0; i < numFrames; i++) {
|
||||||
|
for (int ch = 0; ch < numChannels; ch++) {
|
||||||
|
float f = *src++;
|
||||||
|
f *= (1/32768.0f); // scale
|
||||||
|
dst[ch][i] = f; // deinterleave
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void interleaveToInt16(const float* const* src, int16_t* dst, int numFrames, int numChannels) {
|
||||||
|
for (int i = 0; i < numFrames; i++) {
|
||||||
|
for (int ch = 0; ch < numChannels; ch++) {
|
||||||
|
float f = src[ch][i];
|
||||||
|
f *= 32768.0f; // scale
|
||||||
|
f += (f < 0.0f) ? -0.5f : 0.5f; // round
|
||||||
|
f = std::max(std::min(f, 32767.0f), -32768.0f); // saturate
|
||||||
|
*dst++ = (int16_t)f; // interleave
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioClient::configureWebrtc() {
|
||||||
|
_apm = webrtc::AudioProcessingBuilder().Create();
|
||||||
|
|
||||||
|
webrtc::AudioProcessing::Config config;
|
||||||
|
|
||||||
|
config.pre_amplifier.enabled = false;
|
||||||
|
config.high_pass_filter.enabled = false;
|
||||||
|
config.echo_canceller.enabled = true;
|
||||||
|
config.echo_canceller.mobile_mode = false;
|
||||||
|
config.echo_canceller.use_legacy_aec = false;
|
||||||
|
config.noise_suppression.enabled = false;
|
||||||
|
config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
|
||||||
|
config.voice_detection.enabled = false;
|
||||||
|
config.gain_controller1.enabled = false;
|
||||||
|
config.gain_controller2.enabled = false;
|
||||||
|
config.gain_controller2.fixed_digital.gain_db = 0.0f;
|
||||||
|
config.gain_controller2.adaptive_digital.enabled = false;
|
||||||
|
config.residual_echo_detector.enabled = true;
|
||||||
|
config.level_estimation.enabled = false;
|
||||||
|
|
||||||
|
_apm->ApplyConfig(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
// rebuffer into 10ms chunks
|
||||||
|
void AudioClient::processWebrtcFarEnd(const int16_t* samples, int numFrames, int numChannels, int sampleRate) {
|
||||||
|
|
||||||
|
const webrtc::StreamConfig streamConfig = webrtc::StreamConfig(sampleRate, numChannels);
|
||||||
|
const int numChunk = (int)streamConfig.num_frames();
|
||||||
|
|
||||||
|
if (sampleRate > WEBRTC_SAMPLE_RATE_MAX) {
|
||||||
|
qCWarning(audioclient) << "WebRTC does not support" << sampleRate << "output sample rate.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (numChannels > WEBRTC_CHANNELS_MAX) {
|
||||||
|
qCWarning(audioclient) << "WebRTC does not support" << numChannels << "output channels.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (numFrames > 0) {
|
||||||
|
|
||||||
|
// number of frames to fill
|
||||||
|
int numFill = std::min(numFrames, numChunk - _numFifoFarEnd);
|
||||||
|
|
||||||
|
// refill fifo
|
||||||
|
memcpy(&_fifoFarEnd[_numFifoFarEnd], samples, numFill * numChannels * sizeof(int16_t));
|
||||||
|
samples += numFill * numChannels;
|
||||||
|
numFrames -= numFill;
|
||||||
|
_numFifoFarEnd += numFill;
|
||||||
|
|
||||||
|
if (_numFifoFarEnd == numChunk) {
|
||||||
|
|
||||||
|
// convert audio format
|
||||||
|
float buffer[WEBRTC_CHANNELS_MAX][WEBRTC_FRAMES_MAX];
|
||||||
|
float* const buffers[WEBRTC_CHANNELS_MAX] = { buffer[0], buffer[1] };
|
||||||
|
deinterleaveToFloat(_fifoFarEnd, buffers, numChunk, numChannels);
|
||||||
|
|
||||||
|
// process one chunk
|
||||||
|
int error = _apm->ProcessReverseStream(buffers, streamConfig, streamConfig, buffers);
|
||||||
|
if (error != _apm->kNoError) {
|
||||||
|
qCWarning(audioclient) << "WebRTC ProcessReverseStream() returned ERROR:" << error;
|
||||||
|
}
|
||||||
|
_numFifoFarEnd = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioClient::processWebrtcNearEnd(int16_t* samples, int numFrames, int numChannels, int sampleRate) {
|
||||||
|
|
||||||
|
const webrtc::StreamConfig streamConfig = webrtc::StreamConfig(sampleRate, numChannels);
|
||||||
|
const int numChunk = (int)streamConfig.num_frames();
|
||||||
|
|
||||||
|
if (sampleRate > WEBRTC_SAMPLE_RATE_MAX) {
|
||||||
|
qCWarning(audioclient) << "WebRTC does not support" << sampleRate << "input sample rate.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (numChannels > WEBRTC_CHANNELS_MAX) {
|
||||||
|
qCWarning(audioclient) << "WebRTC does not support" << numChannels << "input channels.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (numFrames != numChunk) {
|
||||||
|
qCWarning(audioclient) << "WebRTC requires exactly 10ms of input.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert audio format
|
||||||
|
float buffer[WEBRTC_CHANNELS_MAX][WEBRTC_FRAMES_MAX];
|
||||||
|
float* const buffers[WEBRTC_CHANNELS_MAX] = { buffer[0], buffer[1] };
|
||||||
|
deinterleaveToFloat(samples, buffers, numFrames, numChannels);
|
||||||
|
|
||||||
|
// process one chunk
|
||||||
|
int error = _apm->ProcessStream(buffers, streamConfig, streamConfig, buffers);
|
||||||
|
if (error != _apm->kNoError) {
|
||||||
|
qCWarning(audioclient) << "WebRTC ProcessStream() returned ERROR:" << error;
|
||||||
|
} else {
|
||||||
|
// modify samples in-place
|
||||||
|
interleaveToInt16(buffers, samples, numFrames, numChannels);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // WEBRTC_ENABLED
|
||||||
|
|
||||||
void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
||||||
// If there is server echo, reverb will be applied to the recieved audio stream so no need to have it here.
|
// If there is server echo, reverb will be applied to the recieved audio stream so no need to have it here.
|
||||||
bool hasReverb = _reverb || _receivedAudioStream.hasReverb();
|
bool hasReverb = _reverb || _receivedAudioStream.hasReverb();
|
||||||
|
@ -1262,6 +1393,13 @@ void AudioClient::handleMicAudioInput() {
|
||||||
|
|
||||||
_inputRingBuffer.readSamples(inputAudioSamples.get(), inputSamplesRequired);
|
_inputRingBuffer.readSamples(inputAudioSamples.get(), inputSamplesRequired);
|
||||||
|
|
||||||
|
#if defined(WEBRTC_ENABLED)
|
||||||
|
if (_isAECEnabled) {
|
||||||
|
processWebrtcNearEnd(inputAudioSamples.get(), inputSamplesRequired / _inputFormat.channelCount(),
|
||||||
|
_inputFormat.channelCount(), _inputFormat.sampleRate());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// detect loudness and clipping on the raw input
|
// detect loudness and clipping on the raw input
|
||||||
bool isClipping = false;
|
bool isClipping = false;
|
||||||
float loudness = computeLoudness(inputAudioSamples.get(), inputSamplesRequired, _inputFormat.channelCount(), isClipping);
|
float loudness = computeLoudness(inputAudioSamples.get(), inputSamplesRequired, _inputFormat.channelCount(), isClipping);
|
||||||
|
@ -1574,6 +1712,15 @@ void AudioClient::setWarnWhenMuted(bool enable, bool emitSignal) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AudioClient::setAcousticEchoCancellation(bool enable, bool emitSignal) {
|
||||||
|
if (_isAECEnabled != enable) {
|
||||||
|
_isAECEnabled = enable;
|
||||||
|
if (emitSignal) {
|
||||||
|
emit acousticEchoCancellationChanged(_isAECEnabled);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool AudioClient::setIsStereoInput(bool isStereoInput) {
|
bool AudioClient::setIsStereoInput(bool isStereoInput) {
|
||||||
bool stereoInputChanged = false;
|
bool stereoInputChanged = false;
|
||||||
if (isStereoInput != _isStereoInput && _inputDeviceInfo.supportedChannelCounts().contains(2)) {
|
if (isStereoInput != _isStereoInput && _inputDeviceInfo.supportedChannelCounts().contains(2)) {
|
||||||
|
@ -2107,15 +2254,16 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
||||||
return maxSize;
|
return maxSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
// samples requested from OUTPUT_CHANNEL_COUNT
|
// max samples requested from OUTPUT_CHANNEL_COUNT
|
||||||
int deviceChannelCount = _audio->_outputFormat.channelCount();
|
int deviceChannelCount = _audio->_outputFormat.channelCount();
|
||||||
int samplesRequested = (int)(maxSize / AudioConstants::SAMPLE_SIZE) * OUTPUT_CHANNEL_COUNT / deviceChannelCount;
|
int maxSamplesRequested = (int)(maxSize / AudioConstants::SAMPLE_SIZE) * OUTPUT_CHANNEL_COUNT / deviceChannelCount;
|
||||||
// restrict samplesRequested to the size of our mix/scratch buffers
|
// restrict samplesRequested to the size of our mix/scratch buffers
|
||||||
samplesRequested = std::min(samplesRequested, _audio->_outputPeriod);
|
maxSamplesRequested = std::min(maxSamplesRequested, _audio->_outputPeriod);
|
||||||
|
|
||||||
int16_t* scratchBuffer = _audio->_outputScratchBuffer;
|
int16_t* scratchBuffer = _audio->_outputScratchBuffer;
|
||||||
float* mixBuffer = _audio->_outputMixBuffer;
|
float* mixBuffer = _audio->_outputMixBuffer;
|
||||||
|
|
||||||
|
int samplesRequested = maxSamplesRequested;
|
||||||
int networkSamplesPopped;
|
int networkSamplesPopped;
|
||||||
if ((networkSamplesPopped = _receivedAudioStream.popSamples(samplesRequested, false)) > 0) {
|
if ((networkSamplesPopped = _receivedAudioStream.popSamples(samplesRequested, false)) > 0) {
|
||||||
qCDebug(audiostream, "Read %d samples from buffer (%d available, %d requested)", networkSamplesPopped, _receivedAudioStream.getSamplesAvailable(), samplesRequested);
|
qCDebug(audiostream, "Read %d samples from buffer (%d available, %d requested)", networkSamplesPopped, _receivedAudioStream.getSamplesAvailable(), samplesRequested);
|
||||||
|
@ -2160,45 +2308,45 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
||||||
});
|
});
|
||||||
|
|
||||||
int samplesPopped = std::max(networkSamplesPopped, injectorSamplesPopped);
|
int samplesPopped = std::max(networkSamplesPopped, injectorSamplesPopped);
|
||||||
int framesPopped = samplesPopped / AudioConstants::STEREO;
|
if (samplesPopped == 0) {
|
||||||
int bytesWritten;
|
// nothing on network, don't grab anything from injectors, and fill with silence
|
||||||
if (samplesPopped > 0) {
|
samplesPopped = maxSamplesRequested;
|
||||||
|
memset(mixBuffer, 0, samplesPopped * sizeof(float));
|
||||||
// apply output gain
|
|
||||||
float newGain = _audio->_outputGain.load(std::memory_order_acquire);
|
|
||||||
float oldGain = _audio->_lastOutputGain;
|
|
||||||
_audio->_lastOutputGain = newGain;
|
|
||||||
|
|
||||||
applyGainSmoothing<OUTPUT_CHANNEL_COUNT>(mixBuffer, framesPopped, oldGain, newGain);
|
|
||||||
|
|
||||||
if (deviceChannelCount == OUTPUT_CHANNEL_COUNT) {
|
|
||||||
// limit the audio
|
|
||||||
_audio->_audioLimiter.render(mixBuffer, (int16_t*)data, framesPopped);
|
|
||||||
} else {
|
|
||||||
_audio->_audioLimiter.render(mixBuffer, scratchBuffer, framesPopped);
|
|
||||||
|
|
||||||
// upmix or downmix to deviceChannelCount
|
|
||||||
if (deviceChannelCount > OUTPUT_CHANNEL_COUNT) {
|
|
||||||
int extraChannels = deviceChannelCount - OUTPUT_CHANNEL_COUNT;
|
|
||||||
channelUpmix(scratchBuffer, (int16_t*)data, samplesPopped, extraChannels);
|
|
||||||
} else {
|
|
||||||
channelDownmix(scratchBuffer, (int16_t*)data, samplesPopped);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bytesWritten = framesPopped * AudioConstants::SAMPLE_SIZE * deviceChannelCount;
|
|
||||||
assert(bytesWritten <= maxSize);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// nothing on network, don't grab anything from injectors, and just return 0s
|
|
||||||
memset(data, 0, maxSize);
|
|
||||||
bytesWritten = maxSize;
|
|
||||||
}
|
}
|
||||||
|
int framesPopped = samplesPopped / OUTPUT_CHANNEL_COUNT;
|
||||||
|
|
||||||
|
// apply output gain
|
||||||
|
float newGain = _audio->_outputGain.load(std::memory_order_acquire);
|
||||||
|
float oldGain = _audio->_lastOutputGain;
|
||||||
|
_audio->_lastOutputGain = newGain;
|
||||||
|
|
||||||
|
applyGainSmoothing<OUTPUT_CHANNEL_COUNT>(mixBuffer, framesPopped, oldGain, newGain);
|
||||||
|
|
||||||
|
// limit the audio
|
||||||
|
_audio->_audioLimiter.render(mixBuffer, scratchBuffer, framesPopped);
|
||||||
|
|
||||||
|
#if defined(WEBRTC_ENABLED)
|
||||||
|
if (_audio->_isAECEnabled) {
|
||||||
|
_audio->processWebrtcFarEnd(scratchBuffer, framesPopped, OUTPUT_CHANNEL_COUNT, _audio->_outputFormat.sampleRate());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// if required, upmix or downmix to deviceChannelCount
|
||||||
|
if (deviceChannelCount == OUTPUT_CHANNEL_COUNT) {
|
||||||
|
memcpy(data, scratchBuffer, samplesPopped * AudioConstants::SAMPLE_SIZE);
|
||||||
|
} else if (deviceChannelCount > OUTPUT_CHANNEL_COUNT) {
|
||||||
|
int extraChannels = deviceChannelCount - OUTPUT_CHANNEL_COUNT;
|
||||||
|
channelUpmix(scratchBuffer, (int16_t*)data, samplesPopped, extraChannels);
|
||||||
|
} else {
|
||||||
|
channelDownmix(scratchBuffer, (int16_t*)data, samplesPopped);
|
||||||
|
}
|
||||||
|
int bytesWritten = framesPopped * AudioConstants::SAMPLE_SIZE * deviceChannelCount;
|
||||||
|
assert(bytesWritten <= maxSize);
|
||||||
|
|
||||||
// send output buffer for recording
|
// send output buffer for recording
|
||||||
if (_audio->_isRecording) {
|
if (_audio->_isRecording) {
|
||||||
Lock lock(_recordMutex);
|
Lock lock(_recordMutex);
|
||||||
_audio->_audioFileWav.addRawAudioChunk(reinterpret_cast<char*>(scratchBuffer), bytesWritten);
|
_audio->_audioFileWav.addRawAudioChunk(data, bytesWritten);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bytesAudioOutputUnplayed = _audio->_audioOutput->bufferSize() - _audio->_audioOutput->bytesFree();
|
int bytesAudioOutputUnplayed = _audio->_audioOutput->bufferSize() - _audio->_audioOutput->bytesFree();
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <AbstractAudioInterface.h>
|
#include <AbstractAudioInterface.h>
|
||||||
#include <AudioEffectOptions.h>
|
#include <AudioEffectOptions.h>
|
||||||
#include <AudioStreamStats.h>
|
#include <AudioStreamStats.h>
|
||||||
|
#include <shared/WebRTC.h>
|
||||||
|
|
||||||
#include <DependencyManager.h>
|
#include <DependencyManager.h>
|
||||||
#include <HifiSockAddr.h>
|
#include <HifiSockAddr.h>
|
||||||
|
@ -215,6 +216,9 @@ public slots:
|
||||||
void setWarnWhenMuted(bool isNoiseGateEnabled, bool emitSignal = true);
|
void setWarnWhenMuted(bool isNoiseGateEnabled, bool emitSignal = true);
|
||||||
bool isWarnWhenMutedEnabled() const { return _warnWhenMuted; }
|
bool isWarnWhenMutedEnabled() const { return _warnWhenMuted; }
|
||||||
|
|
||||||
|
void setAcousticEchoCancellation(bool isAECEnabled, bool emitSignal = true);
|
||||||
|
bool isAcousticEchoCancellationEnabled() const { return _isAECEnabled; }
|
||||||
|
|
||||||
virtual bool getLocalEcho() override { return _shouldEchoLocally; }
|
virtual bool getLocalEcho() override { return _shouldEchoLocally; }
|
||||||
virtual void setLocalEcho(bool localEcho) override { _shouldEchoLocally = localEcho; }
|
virtual void setLocalEcho(bool localEcho) override { _shouldEchoLocally = localEcho; }
|
||||||
virtual void toggleLocalEcho() override { _shouldEchoLocally = !_shouldEchoLocally; }
|
virtual void toggleLocalEcho() override { _shouldEchoLocally = !_shouldEchoLocally; }
|
||||||
|
@ -256,6 +260,7 @@ signals:
|
||||||
void muteToggled(bool muted);
|
void muteToggled(bool muted);
|
||||||
void noiseReductionChanged(bool noiseReductionEnabled);
|
void noiseReductionChanged(bool noiseReductionEnabled);
|
||||||
void warnWhenMutedChanged(bool warnWhenMutedEnabled);
|
void warnWhenMutedChanged(bool warnWhenMutedEnabled);
|
||||||
|
void acousticEchoCancellationChanged(bool acousticEchoCancellationEnabled);
|
||||||
void mutedByMixer();
|
void mutedByMixer();
|
||||||
void inputReceived(const QByteArray& inputSamples);
|
void inputReceived(const QByteArray& inputSamples);
|
||||||
void inputLoudnessChanged(float loudness, bool isClipping);
|
void inputLoudnessChanged(float loudness, bool isClipping);
|
||||||
|
@ -377,6 +382,7 @@ private:
|
||||||
bool _shouldEchoToServer;
|
bool _shouldEchoToServer;
|
||||||
bool _isNoiseGateEnabled;
|
bool _isNoiseGateEnabled;
|
||||||
bool _warnWhenMuted;
|
bool _warnWhenMuted;
|
||||||
|
bool _isAECEnabled;
|
||||||
|
|
||||||
bool _reverb;
|
bool _reverb;
|
||||||
AudioEffectOptions _scriptReverbOptions;
|
AudioEffectOptions _scriptReverbOptions;
|
||||||
|
@ -414,9 +420,23 @@ private:
|
||||||
// Adds Reverb
|
// Adds Reverb
|
||||||
void configureReverb();
|
void configureReverb();
|
||||||
void updateReverbOptions();
|
void updateReverbOptions();
|
||||||
|
|
||||||
void handleLocalEchoAndReverb(QByteArray& inputByteArray);
|
void handleLocalEchoAndReverb(QByteArray& inputByteArray);
|
||||||
|
|
||||||
|
#if defined(WEBRTC_ENABLED)
|
||||||
|
static const int WEBRTC_SAMPLE_RATE_MAX = 96000;
|
||||||
|
static const int WEBRTC_CHANNELS_MAX = 2;
|
||||||
|
static const int WEBRTC_FRAMES_MAX = webrtc::AudioProcessing::kChunkSizeMs * WEBRTC_SAMPLE_RATE_MAX / 1000;
|
||||||
|
|
||||||
|
webrtc::AudioProcessing* _apm { nullptr };
|
||||||
|
|
||||||
|
int16_t _fifoFarEnd[WEBRTC_CHANNELS_MAX * WEBRTC_FRAMES_MAX] {};
|
||||||
|
int _numFifoFarEnd = 0; // numFrames saved in fifo
|
||||||
|
|
||||||
|
void configureWebrtc();
|
||||||
|
void processWebrtcFarEnd(const int16_t* samples, int numFrames, int numChannels, int sampleRate);
|
||||||
|
void processWebrtcNearEnd(int16_t* samples, int numFrames, int numChannels, int sampleRate);
|
||||||
|
#endif
|
||||||
|
|
||||||
bool switchInputToAudioDevice(const QAudioDeviceInfo inputDeviceInfo, bool isShutdownRequest = false);
|
bool switchInputToAudioDevice(const QAudioDeviceInfo inputDeviceInfo, bool isShutdownRequest = false);
|
||||||
bool switchOutputToAudioDevice(const QAudioDeviceInfo outputDeviceInfo, bool isShutdownRequest = false);
|
bool switchOutputToAudioDevice(const QAudioDeviceInfo outputDeviceInfo, bool isShutdownRequest = false);
|
||||||
|
|
||||||
|
|
|
@ -106,33 +106,33 @@ void Instance::enumerateNics() {
|
||||||
}
|
}
|
||||||
|
|
||||||
json Instance::getCPU(int index) {
|
json Instance::getCPU(int index) {
|
||||||
assert(index <(int) _cpus.size());
|
assert(index < (int)_cpus.size());
|
||||||
|
|
||||||
if (index < 0 || (int) _cpus.size() <= index)
|
if (index < 0 || (int)_cpus.size() <= index)
|
||||||
return json();
|
return json();
|
||||||
|
|
||||||
return _cpus.at(index);
|
return _cpus.at(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
json Instance::getGPU(int index) {
|
json Instance::getGPU(int index) {
|
||||||
assert(index <(int) _gpus.size());
|
assert(index < (int)_gpus.size());
|
||||||
|
|
||||||
if (index < 0 || (int) _gpus.size() <= index)
|
if (index < 0 || (int)_gpus.size() <= index)
|
||||||
return json();
|
return json();
|
||||||
|
|
||||||
return _gpus.at(index);
|
return _gpus.at(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
json Instance::getDisplay(int index) {
|
json Instance::getDisplay(int index) {
|
||||||
assert(index <(int) _displays.size());
|
assert(index < (int)_displays.size());
|
||||||
|
|
||||||
if (index < 0 || (int) _displays.size() <= index)
|
if (index < 0 || (int)_displays.size() <= index)
|
||||||
return json();
|
return json();
|
||||||
|
|
||||||
return _displays.at(index);
|
return _displays.at(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Instance::~Instance() {
|
Instance::~Instance() {
|
||||||
if (_cpus.size() > 0) {
|
if (_cpus.size() > 0) {
|
||||||
_cpus.clear();
|
_cpus.clear();
|
||||||
|
@ -147,7 +147,6 @@ Instance::~Instance() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
json Instance::listAllKeys() {
|
json Instance::listAllKeys() {
|
||||||
json allKeys;
|
json allKeys;
|
||||||
allKeys.array({{
|
allKeys.array({{
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
#include <shaders/Shaders.h>
|
#include <shaders/Shaders.h>
|
||||||
|
|
||||||
#include <render/BlurTask.h>
|
#include <render/BlurTask.h>
|
||||||
#include <render/ResampleTask.h>
|
|
||||||
#include "render-utils/ShaderConstants.h"
|
#include "render-utils/ShaderConstants.h"
|
||||||
|
|
||||||
#define BLOOM_BLUR_LEVEL_COUNT 3
|
#define BLOOM_BLUR_LEVEL_COUNT 3
|
||||||
|
|
|
@ -148,6 +148,30 @@ void Blit::run(const RenderContextPointer& renderContext, const gpu::Framebuffer
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NewFramebuffer::NewFramebuffer(gpu::Element pixelFormat) {
|
||||||
|
_pixelFormat = pixelFormat;
|
||||||
|
}
|
||||||
|
|
||||||
|
void NewFramebuffer::run(const render::RenderContextPointer& renderContext, Output& output) {
|
||||||
|
RenderArgs* args = renderContext->args;
|
||||||
|
glm::uvec2 frameSize(args->_viewport.z, args->_viewport.w);
|
||||||
|
output.reset();
|
||||||
|
|
||||||
|
if (_outputFramebuffer && _outputFramebuffer->getSize() != frameSize) {
|
||||||
|
_outputFramebuffer.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_outputFramebuffer) {
|
||||||
|
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("newFramebuffer.out"));
|
||||||
|
auto colorFormat = _pixelFormat;
|
||||||
|
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
||||||
|
auto colorTexture = gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
|
_outputFramebuffer->setRenderBuffer(0, colorTexture);
|
||||||
|
}
|
||||||
|
|
||||||
|
output = _outputFramebuffer;
|
||||||
|
}
|
||||||
|
|
||||||
void NewOrDefaultFramebuffer::run(const render::RenderContextPointer& renderContext, const Input& input, Output& output) {
|
void NewOrDefaultFramebuffer::run(const render::RenderContextPointer& renderContext, const Input& input, Output& output) {
|
||||||
RenderArgs* args = renderContext->args;
|
RenderArgs* args = renderContext->args;
|
||||||
// auto frameSize = input;
|
// auto frameSize = input;
|
||||||
|
@ -167,7 +191,7 @@ void NewOrDefaultFramebuffer::run(const render::RenderContextPointer& renderCont
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_outputFramebuffer) {
|
if (!_outputFramebuffer) {
|
||||||
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("newFramebuffer.out"));
|
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("newOrDefaultFramebuffer.out"));
|
||||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
||||||
auto colorTexture = gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);
|
auto colorTexture = gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
|
|
|
@ -83,6 +83,20 @@ public:
|
||||||
void run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& srcFramebuffer);
|
void run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& srcFramebuffer);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class NewFramebuffer {
|
||||||
|
public:
|
||||||
|
using Output = gpu::FramebufferPointer;
|
||||||
|
using JobModel = render::Job::ModelO<NewFramebuffer, Output>;
|
||||||
|
|
||||||
|
NewFramebuffer(gpu::Element pixelFormat = gpu::Element::COLOR_SRGBA_32);
|
||||||
|
|
||||||
|
void run(const render::RenderContextPointer& renderContext, Output& output);
|
||||||
|
protected:
|
||||||
|
gpu::Element _pixelFormat;
|
||||||
|
private:
|
||||||
|
gpu::FramebufferPointer _outputFramebuffer;
|
||||||
|
};
|
||||||
|
|
||||||
class NewOrDefaultFramebuffer {
|
class NewOrDefaultFramebuffer {
|
||||||
public:
|
public:
|
||||||
using Input = glm::uvec2;
|
using Input = glm::uvec2;
|
||||||
|
|
|
@ -29,7 +29,6 @@
|
||||||
#include <render/DrawStatus.h>
|
#include <render/DrawStatus.h>
|
||||||
#include <render/DrawSceneOctree.h>
|
#include <render/DrawSceneOctree.h>
|
||||||
#include <render/BlurTask.h>
|
#include <render/BlurTask.h>
|
||||||
#include <render/ResampleTask.h>
|
|
||||||
|
|
||||||
#include "RenderHifi.h"
|
#include "RenderHifi.h"
|
||||||
#include "render-utils/ShaderConstants.h"
|
#include "render-utils/ShaderConstants.h"
|
||||||
|
@ -51,7 +50,7 @@
|
||||||
|
|
||||||
#include "AmbientOcclusionEffect.h"
|
#include "AmbientOcclusionEffect.h"
|
||||||
#include "AntialiasingEffect.h"
|
#include "AntialiasingEffect.h"
|
||||||
#include "ToneMappingEffect.h"
|
#include "ToneMapAndResampleTask.h"
|
||||||
#include "SubsurfaceScattering.h"
|
#include "SubsurfaceScattering.h"
|
||||||
#include "DrawHaze.h"
|
#include "DrawHaze.h"
|
||||||
#include "BloomEffect.h"
|
#include "BloomEffect.h"
|
||||||
|
@ -96,7 +95,7 @@ RenderDeferredTask::RenderDeferredTask()
|
||||||
|
|
||||||
void RenderDeferredTask::configure(const Config& config) {
|
void RenderDeferredTask::configure(const Config& config) {
|
||||||
// Propagate resolution scale to sub jobs who need it
|
// Propagate resolution scale to sub jobs who need it
|
||||||
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebuffer>("PreparePrimaryBuffer");
|
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebuffer>("PreparePrimaryBufferDeferred");
|
||||||
assert(preparePrimaryBufferConfig);
|
assert(preparePrimaryBufferConfig);
|
||||||
preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale);
|
preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale);
|
||||||
}
|
}
|
||||||
|
@ -146,7 +145,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
||||||
const auto jitter = task.addJob<JitterSample>("JitterCam");
|
const auto jitter = task.addJob<JitterSample>("JitterCam");
|
||||||
|
|
||||||
// GPU jobs: Start preparing the primary, deferred and lighting buffer
|
// GPU jobs: Start preparing the primary, deferred and lighting buffer
|
||||||
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebuffer>("PreparePrimaryBuffer");
|
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebuffer>("PreparePrimaryBufferDeferred");
|
||||||
|
|
||||||
// Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer
|
// Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer
|
||||||
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform", jitter);
|
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform", jitter);
|
||||||
|
@ -238,23 +237,22 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
||||||
const auto bloomInputs = BloomEffect::Inputs(deferredFrameTransform, lightingFramebuffer, bloomFrame).asVarying();
|
const auto bloomInputs = BloomEffect::Inputs(deferredFrameTransform, lightingFramebuffer, bloomFrame).asVarying();
|
||||||
task.addJob<BloomEffect>("Bloom", bloomInputs);
|
task.addJob<BloomEffect>("Bloom", bloomInputs);
|
||||||
|
|
||||||
|
const auto destFramebuffer = static_cast<gpu::FramebufferPointer>(nullptr);
|
||||||
|
|
||||||
// Lighting Buffer ready for tone mapping
|
// Lighting Buffer ready for tone mapping
|
||||||
const auto toneMappingInputs = ToneMappingDeferred::Input(lightingFramebuffer, scaledPrimaryFramebuffer).asVarying();
|
const auto toneMappingInputs = ToneMapAndResample::Input(lightingFramebuffer, destFramebuffer).asVarying();
|
||||||
const auto toneMappedBuffer = task.addJob<ToneMappingDeferred>("ToneMapping", toneMappingInputs);
|
const auto toneMappedBuffer = task.addJob<ToneMapAndResample>("ToneMapping", toneMappingInputs);
|
||||||
|
|
||||||
// Debugging task is happening in the "over" layer after tone mapping and just before HUD
|
// Debugging task is happening in the "over" layer after tone mapping and just before HUD
|
||||||
{ // Debug the bounds of the rendered items, still look at the zbuffer
|
{ // Debug the bounds of the rendered items, still look at the zbuffer
|
||||||
const auto extraDebugBuffers = RenderDeferredTaskDebug::ExtraBuffers(linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, ambientOcclusionUniforms, scatteringResource, velocityBuffer);
|
const auto extraDebugBuffers = RenderDeferredTaskDebug::ExtraBuffers(linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, ambientOcclusionUniforms, scatteringResource, velocityBuffer);
|
||||||
const auto debugInputs = RenderDeferredTaskDebug::Input(fetchedItems, shadowTaskOutputs, lightingStageInputs, lightClusters, prepareDeferredOutputs, extraDebugBuffers,
|
const auto debugInputs = RenderDeferredTaskDebug::Input(fetchedItems, shadowTaskOutputs, lightingStageInputs, lightClusters, prepareDeferredOutputs, extraDebugBuffers,
|
||||||
deferredFrameTransform, jitter, lightingModel).asVarying();
|
deferredFrameTransform, jitter, lightingModel).asVarying();
|
||||||
task.addJob<RenderDeferredTaskDebug>("DebugRenderDeferredTask", debugInputs);
|
task.addJob<RenderDeferredTaskDebug>("DebugRenderDeferredTask", debugInputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upscale to finale resolution
|
|
||||||
const auto primaryFramebuffer = task.addJob<render::UpsampleToBlitFramebuffer>("PrimaryBufferUpscale", toneMappedBuffer);
|
|
||||||
|
|
||||||
// HUD Layer
|
// HUD Layer
|
||||||
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(primaryFramebuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
|
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
|
||||||
task.addJob<RenderHUDLayerTask>("RenderHUDLayer", renderHUDLayerInputs);
|
task.addJob<RenderHUDLayerTask>("RenderHUDLayer", renderHUDLayerInputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -415,7 +413,6 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input
|
||||||
|
|
||||||
const auto debugZoneInputs = DebugZoneLighting::Inputs(deferredFrameTransform, lightFrame, backgroundFrame).asVarying();
|
const auto debugZoneInputs = DebugZoneLighting::Inputs(deferredFrameTransform, lightFrame, backgroundFrame).asVarying();
|
||||||
task.addJob<DebugZoneLighting>("DrawZoneStack", debugZoneInputs);
|
task.addJob<DebugZoneLighting>("DrawZoneStack", debugZoneInputs);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
#include <gpu/Texture.h>
|
#include <gpu/Texture.h>
|
||||||
#include <graphics/ShaderConstants.h>
|
#include <graphics/ShaderConstants.h>
|
||||||
#include <render/ShapePipeline.h>
|
#include <render/ShapePipeline.h>
|
||||||
#include <render/ResampleTask.h>
|
|
||||||
|
|
||||||
#include <render/FilterTask.h>
|
#include <render/FilterTask.h>
|
||||||
|
|
||||||
|
@ -28,7 +27,7 @@
|
||||||
#include "StencilMaskPass.h"
|
#include "StencilMaskPass.h"
|
||||||
#include "ZoneRenderer.h"
|
#include "ZoneRenderer.h"
|
||||||
#include "FadeEffect.h"
|
#include "FadeEffect.h"
|
||||||
#include "ToneMappingEffect.h"
|
#include "ToneMapAndResampleTask.h"
|
||||||
#include "BackgroundStage.h"
|
#include "BackgroundStage.h"
|
||||||
#include "FramebufferCache.h"
|
#include "FramebufferCache.h"
|
||||||
#include "TextureCache.h"
|
#include "TextureCache.h"
|
||||||
|
@ -51,7 +50,7 @@ extern void initForwardPipelines(ShapePlumber& plumber);
|
||||||
|
|
||||||
void RenderForwardTask::configure(const Config& config) {
|
void RenderForwardTask::configure(const Config& config) {
|
||||||
// Propagate resolution scale to sub jobs who need it
|
// Propagate resolution scale to sub jobs who need it
|
||||||
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebufferMSAA>("PreparePrimaryBuffer");
|
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebufferMSAA>("PreparePrimaryBufferForward");
|
||||||
assert(preparePrimaryBufferConfig);
|
assert(preparePrimaryBufferConfig);
|
||||||
preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale);
|
preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale);
|
||||||
}
|
}
|
||||||
|
@ -99,7 +98,7 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend
|
||||||
|
|
||||||
|
|
||||||
// GPU jobs: Start preparing the main framebuffer
|
// GPU jobs: Start preparing the main framebuffer
|
||||||
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebufferMSAA>("PreparePrimaryBuffer");
|
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebufferMSAA>("PreparePrimaryBufferForward");
|
||||||
|
|
||||||
// Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer
|
// Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer
|
||||||
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform");
|
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform");
|
||||||
|
@ -141,34 +140,17 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend
|
||||||
task.addJob<DebugZoneLighting>("DrawZoneStack", debugZoneInputs);
|
task.addJob<DebugZoneLighting>("DrawZoneStack", debugZoneInputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(Q_OS_ANDROID)
|
const auto newResolvedFramebuffer = task.addJob<NewFramebuffer>("MakeResolvingFramebuffer", gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10));
|
||||||
|
|
||||||
// Just resolve the msaa
|
|
||||||
const auto resolveInputs = ResolveFramebuffer::Inputs(scaledPrimaryFramebuffer, static_cast<gpu::FramebufferPointer>(nullptr)).asVarying();
|
|
||||||
const auto resolvedFramebuffer = task.addJob<ResolveFramebuffer>("Resolve", resolveInputs);
|
|
||||||
|
|
||||||
const auto toneMappedBuffer = resolvedFramebuffer;
|
|
||||||
#else
|
|
||||||
const auto newResolvedFramebuffer = task.addJob<NewOrDefaultFramebuffer>("MakeResolvingFramebuffer");
|
|
||||||
|
|
||||||
|
|
||||||
// Just resolve the msaa
|
|
||||||
const auto resolveInputs = ResolveFramebuffer::Inputs(scaledPrimaryFramebuffer, newResolvedFramebuffer).asVarying();
|
const auto resolveInputs = ResolveFramebuffer::Inputs(scaledPrimaryFramebuffer, newResolvedFramebuffer).asVarying();
|
||||||
const auto resolvedFramebuffer = task.addJob<ResolveFramebuffer>("Resolve", resolveInputs);
|
const auto resolvedFramebuffer = task.addJob<ResolveFramebuffer>("Resolve", resolveInputs);
|
||||||
|
|
||||||
// Lighting Buffer ready for tone mapping
|
const auto destFramebuffer = static_cast<gpu::FramebufferPointer>(nullptr);
|
||||||
// Forward rendering on GLES doesn't support tonemapping to and from the same FBO, so we specify
|
|
||||||
// the output FBO as null, which causes the tonemapping to target the blit framebuffer
|
|
||||||
const auto toneMappingInputs = ToneMappingDeferred::Input(resolvedFramebuffer, resolvedFramebuffer).asVarying();
|
|
||||||
const auto toneMappedBuffer = task.addJob<ToneMappingDeferred>("ToneMapping", toneMappingInputs);
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Upscale to finale resolution
|
|
||||||
const auto primaryFramebuffer = task.addJob<render::UpsampleToBlitFramebuffer>("PrimaryBufferUpscale", toneMappedBuffer);
|
|
||||||
|
|
||||||
|
const auto toneMappingInputs = ToneMapAndResample::Input(resolvedFramebuffer, destFramebuffer).asVarying();
|
||||||
|
const auto toneMappedBuffer = task.addJob<ToneMapAndResample>("ToneMapping", toneMappingInputs);
|
||||||
// HUD Layer
|
// HUD Layer
|
||||||
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(primaryFramebuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
|
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
|
||||||
task.addJob<RenderHUDLayerTask>("RenderHUDLayer", renderHUDLayerInputs);
|
task.addJob<RenderHUDLayerTask>("RenderHUDLayer", renderHUDLayerInputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,8 +158,8 @@ gpu::FramebufferPointer PreparePrimaryFramebufferMSAA::createFramebuffer(const c
|
||||||
gpu::FramebufferPointer framebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(name));
|
gpu::FramebufferPointer framebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(name));
|
||||||
|
|
||||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
||||||
|
|
||||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
auto colorFormat = gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10);
|
||||||
auto colorTexture =
|
auto colorTexture =
|
||||||
gpu::Texture::createRenderBufferMultisample(colorFormat, frameSize.x, frameSize.y, numSamples, defaultSampler);
|
gpu::Texture::createRenderBufferMultisample(colorFormat, frameSize.x, frameSize.y, numSamples, defaultSampler);
|
||||||
framebuffer->setRenderBuffer(0, colorTexture);
|
framebuffer->setRenderBuffer(0, colorTexture);
|
||||||
|
|
|
@ -50,11 +50,13 @@ public:
|
||||||
const float SCALE_RANGE_MIN = 0.1f;
|
const float SCALE_RANGE_MIN = 0.1f;
|
||||||
const float SCALE_RANGE_MAX = 2.0f;
|
const float SCALE_RANGE_MAX = 2.0f;
|
||||||
resolutionScale = std::max(SCALE_RANGE_MIN, std::min(SCALE_RANGE_MAX, scale));
|
resolutionScale = std::max(SCALE_RANGE_MIN, std::min(SCALE_RANGE_MAX, scale));
|
||||||
|
//emit dirty();
|
||||||
}
|
}
|
||||||
|
|
||||||
int getNumSamples() const { return numSamples; }
|
int getNumSamples() const { return numSamples; }
|
||||||
void setNumSamples(int num) {
|
void setNumSamples(int num) {
|
||||||
numSamples = std::max(1, std::min(32, num));
|
numSamples = std::max(1, std::min(32, num));
|
||||||
|
emit dirty();
|
||||||
}
|
}
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
|
|
110
libraries/render-utils/src/ToneMapAndResampleTask.cpp
Normal file
110
libraries/render-utils/src/ToneMapAndResampleTask.cpp
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
//
|
||||||
|
// ToneMapAndResampleTask.cpp
|
||||||
|
// libraries/render-utils/src
|
||||||
|
//
|
||||||
|
// Created by Anna Brewer on 7/3/19.
|
||||||
|
// Copyright 2019 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "ToneMapAndResampleTask.h"
|
||||||
|
|
||||||
|
#include <gpu/Context.h>
|
||||||
|
#include <shaders/Shaders.h>
|
||||||
|
|
||||||
|
#include "render-utils/ShaderConstants.h"
|
||||||
|
#include "StencilMaskPass.h"
|
||||||
|
#include "FramebufferCache.h"
|
||||||
|
|
||||||
|
using namespace render;
|
||||||
|
using namespace shader::gpu::program;
|
||||||
|
using namespace shader::render_utils::program;
|
||||||
|
|
||||||
|
gpu::PipelinePointer ToneMapAndResample::_pipeline;
|
||||||
|
gpu::PipelinePointer ToneMapAndResample::_mirrorPipeline;
|
||||||
|
|
||||||
|
ToneMapAndResample::ToneMapAndResample() {
|
||||||
|
Parameters parameters;
|
||||||
|
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) ¶meters));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToneMapAndResample::init() {
|
||||||
|
// shared_ptr to gpu::State
|
||||||
|
gpu::StatePointer blitState = gpu::StatePointer(new gpu::State());
|
||||||
|
|
||||||
|
blitState->setDepthTest(gpu::State::DepthTest(false, false));
|
||||||
|
blitState->setColorWriteMask(true, true, true, true);
|
||||||
|
|
||||||
|
_pipeline = gpu::PipelinePointer(gpu::Pipeline::create(gpu::Shader::createProgram(toneMapping), blitState));
|
||||||
|
_mirrorPipeline = gpu::PipelinePointer(gpu::Pipeline::create(gpu::Shader::createProgram(toneMapping_mirrored), blitState));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToneMapAndResample::setExposure(float exposure) {
|
||||||
|
auto& params = _parametersBuffer.get<Parameters>();
|
||||||
|
if (params._exposure != exposure) {
|
||||||
|
_parametersBuffer.edit<Parameters>()._exposure = exposure;
|
||||||
|
_parametersBuffer.edit<Parameters>()._twoPowExposure = pow(2.0, exposure);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToneMapAndResample::setToneCurve(ToneCurve curve) {
|
||||||
|
auto& params = _parametersBuffer.get<Parameters>();
|
||||||
|
if (params._toneCurve != (int)curve) {
|
||||||
|
_parametersBuffer.edit<Parameters>()._toneCurve = (int)curve;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToneMapAndResample::configure(const Config& config) {
|
||||||
|
setExposure(config.exposure);
|
||||||
|
setToneCurve((ToneCurve)config.curve);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToneMapAndResample::run(const RenderContextPointer& renderContext, const Input& input, Output& output) {
|
||||||
|
assert(renderContext->args);
|
||||||
|
assert(renderContext->args->hasViewFrustum());
|
||||||
|
|
||||||
|
RenderArgs* args = renderContext->args;
|
||||||
|
|
||||||
|
auto lightingBuffer = input.get0()->getRenderBuffer(0);
|
||||||
|
auto destinationFramebuffer = input.get1();
|
||||||
|
|
||||||
|
if (!destinationFramebuffer) {
|
||||||
|
destinationFramebuffer = args->_blitFramebuffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!lightingBuffer || !destinationFramebuffer) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_pipeline) {
|
||||||
|
init();
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto bufferSize = destinationFramebuffer->getSize();
|
||||||
|
|
||||||
|
auto srcBufferSize = glm::ivec2(lightingBuffer->getDimensions());
|
||||||
|
|
||||||
|
glm::ivec4 destViewport{ 0, 0, bufferSize.x, bufferSize.y };
|
||||||
|
|
||||||
|
gpu::doInBatch("Resample::run", args->_context, [&](gpu::Batch& batch) {
|
||||||
|
batch.enableStereo(false);
|
||||||
|
batch.setFramebuffer(destinationFramebuffer);
|
||||||
|
|
||||||
|
batch.setViewportTransform(destViewport);
|
||||||
|
batch.setProjectionTransform(glm::mat4());
|
||||||
|
batch.resetViewTransform();
|
||||||
|
batch.setPipeline(args->_renderMode == RenderArgs::MIRROR_RENDER_MODE ? _mirrorPipeline : _pipeline);
|
||||||
|
|
||||||
|
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(srcBufferSize, args->_viewport));
|
||||||
|
batch.setUniformBuffer(render_utils::slot::buffer::ToneMappingParams, _parametersBuffer);
|
||||||
|
batch.setResourceTexture(render_utils::slot::texture::ToneMappingColor, lightingBuffer);
|
||||||
|
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set full final viewport
|
||||||
|
args->_viewport = destViewport;
|
||||||
|
|
||||||
|
output = destinationFramebuffer;
|
||||||
|
}
|
|
@ -1,16 +1,16 @@
|
||||||
//
|
//
|
||||||
// ToneMappingEffect.h
|
// ToneMapAndResample.h
|
||||||
// libraries/render-utils/src
|
// libraries/render-utils/src
|
||||||
//
|
//
|
||||||
// Created by Sam Gateau on 12/7/2015.
|
// Created by Anna Brewer on 7/3/19.
|
||||||
// Copyright 2015 High Fidelity, Inc.
|
// Copyright 2019 High Fidelity, Inc.
|
||||||
//
|
//
|
||||||
// Distributed under the Apache License, Version 2.0.
|
// Distributed under the Apache License, Version 2.0.
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
//
|
//
|
||||||
|
|
||||||
#ifndef hifi_ToneMappingEffect_h
|
#ifndef hifi_ToneMapAndResample_h
|
||||||
#define hifi_ToneMappingEffect_h
|
#define hifi_ToneMapAndResample_h
|
||||||
|
|
||||||
#include <DependencyManager.h>
|
#include <DependencyManager.h>
|
||||||
#include <NumericalConstants.h>
|
#include <NumericalConstants.h>
|
||||||
|
@ -20,29 +20,66 @@
|
||||||
#include <render/Forward.h>
|
#include <render/Forward.h>
|
||||||
#include <render/DrawTask.h>
|
#include <render/DrawTask.h>
|
||||||
|
|
||||||
|
enum class ToneCurve {
|
||||||
|
// Different tone curve available
|
||||||
|
None,
|
||||||
|
Gamma22,
|
||||||
|
Reinhard,
|
||||||
|
Filmic,
|
||||||
|
};
|
||||||
|
|
||||||
|
class ToneMappingConfig : public render::Job::Config {
|
||||||
|
Q_OBJECT
|
||||||
|
Q_PROPERTY(float exposure MEMBER exposure WRITE setExposure);
|
||||||
|
Q_PROPERTY(int curve MEMBER curve WRITE setCurve);
|
||||||
|
|
||||||
class ToneMappingEffect {
|
|
||||||
public:
|
public:
|
||||||
ToneMappingEffect();
|
ToneMappingConfig() : render::Job::Config(true) {}
|
||||||
virtual ~ToneMappingEffect() {}
|
|
||||||
|
|
||||||
void render(RenderArgs* args, const gpu::TexturePointer& lightingBuffer, const gpu::FramebufferPointer& destinationBuffer);
|
void setExposure(float newExposure) { exposure = newExposure; emit dirty(); }
|
||||||
|
void setCurve(int newCurve) { curve = std::max((int)ToneCurve::None, std::min((int)ToneCurve::Filmic, newCurve)); emit dirty(); }
|
||||||
|
|
||||||
|
|
||||||
|
float exposure{ 0.0f };
|
||||||
|
int curve{ (int)ToneCurve::Gamma22 };
|
||||||
|
|
||||||
|
signals:
|
||||||
|
void dirty();
|
||||||
|
};
|
||||||
|
|
||||||
|
class ToneMapAndResample {
|
||||||
|
public:
|
||||||
|
ToneMapAndResample();
|
||||||
|
virtual ~ToneMapAndResample() {}
|
||||||
|
|
||||||
|
void render(RenderArgs* args, const gpu::TexturePointer& lightingBuffer, gpu::FramebufferPointer& destinationBuffer);
|
||||||
|
|
||||||
void setExposure(float exposure);
|
void setExposure(float exposure);
|
||||||
float getExposure() const { return _parametersBuffer.get<Parameters>()._exposure; }
|
float getExposure() const { return _parametersBuffer.get<Parameters>()._exposure; }
|
||||||
|
|
||||||
// Different tone curve available
|
|
||||||
enum ToneCurve {
|
|
||||||
None = 0,
|
|
||||||
Gamma22,
|
|
||||||
Reinhard,
|
|
||||||
Filmic,
|
|
||||||
};
|
|
||||||
void setToneCurve(ToneCurve curve);
|
void setToneCurve(ToneCurve curve);
|
||||||
ToneCurve getToneCurve() const { return (ToneCurve)_parametersBuffer.get<Parameters>()._toneCurve; }
|
ToneCurve getToneCurve() const { return (ToneCurve)_parametersBuffer.get<Parameters>()._toneCurve; }
|
||||||
|
|
||||||
private:
|
// Inputs: lightingFramebuffer, destinationFramebuffer
|
||||||
|
using Input = render::VaryingSet2<gpu::FramebufferPointer, gpu::FramebufferPointer>;
|
||||||
|
using Output = gpu::FramebufferPointer;
|
||||||
|
using Config = ToneMappingConfig;
|
||||||
|
using JobModel = render::Job::ModelIO<ToneMapAndResample, Input, Output, Config>;
|
||||||
|
|
||||||
|
void configure(const Config& config);
|
||||||
|
void run(const render::RenderContextPointer& renderContext, const Input& input, Output& output);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static gpu::PipelinePointer _pipeline;
|
||||||
|
static gpu::PipelinePointer _mirrorPipeline;
|
||||||
|
|
||||||
|
gpu::FramebufferPointer _destinationFrameBuffer;
|
||||||
|
|
||||||
|
float _factor{ 2.0f };
|
||||||
|
|
||||||
|
gpu::FramebufferPointer getResampledFrameBuffer(const gpu::FramebufferPointer& sourceFramebuffer);
|
||||||
|
|
||||||
|
private:
|
||||||
gpu::PipelinePointer _blitLightBuffer;
|
gpu::PipelinePointer _blitLightBuffer;
|
||||||
|
|
||||||
// Class describing the uniform buffer with all the parameters common to the tone mapping shaders
|
// Class describing the uniform buffer with all the parameters common to the tone mapping shaders
|
||||||
|
@ -51,46 +88,16 @@ private:
|
||||||
float _exposure = 0.0f;
|
float _exposure = 0.0f;
|
||||||
float _twoPowExposure = 1.0f;
|
float _twoPowExposure = 1.0f;
|
||||||
glm::vec2 spareA;
|
glm::vec2 spareA;
|
||||||
int _toneCurve = Gamma22;
|
int _toneCurve = (int)ToneCurve::Gamma22;
|
||||||
glm::vec3 spareB;
|
glm::vec3 spareB;
|
||||||
|
|
||||||
Parameters() {}
|
Parameters() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef gpu::BufferView UniformBufferView;
|
typedef gpu::BufferView UniformBufferView;
|
||||||
gpu::BufferView _parametersBuffer;
|
gpu::BufferView _parametersBuffer;
|
||||||
|
|
||||||
void init(RenderArgs* args);
|
void init();
|
||||||
};
|
};
|
||||||
|
|
||||||
class ToneMappingConfig : public render::Job::Config {
|
#endif // hifi_ToneMapAndResample_h
|
||||||
Q_OBJECT
|
|
||||||
Q_PROPERTY(float exposure MEMBER exposure WRITE setExposure);
|
|
||||||
Q_PROPERTY(int curve MEMBER curve WRITE setCurve);
|
|
||||||
public:
|
|
||||||
ToneMappingConfig() : render::Job::Config(true) {}
|
|
||||||
|
|
||||||
void setExposure(float newExposure) { exposure = newExposure; emit dirty(); }
|
|
||||||
void setCurve(int newCurve) { curve = std::max((int)ToneMappingEffect::None, std::min((int)ToneMappingEffect::Filmic, newCurve)); emit dirty(); }
|
|
||||||
|
|
||||||
|
|
||||||
float exposure{ 0.0f };
|
|
||||||
int curve{ ToneMappingEffect::Gamma22 };
|
|
||||||
signals:
|
|
||||||
void dirty();
|
|
||||||
};
|
|
||||||
|
|
||||||
class ToneMappingDeferred {
|
|
||||||
public:
|
|
||||||
// Inputs: lightingFramebuffer, destinationFramebuffer
|
|
||||||
using Input = render::VaryingSet2<gpu::FramebufferPointer, gpu::FramebufferPointer>;
|
|
||||||
using Output = gpu::FramebufferPointer;
|
|
||||||
using Config = ToneMappingConfig;
|
|
||||||
using JobModel = render::Job::ModelIO<ToneMappingDeferred, Input, Output, Config>;
|
|
||||||
|
|
||||||
void configure(const Config& config);
|
|
||||||
void run(const render::RenderContextPointer& renderContext, const Input& input, Output& output);
|
|
||||||
|
|
||||||
ToneMappingEffect _toneMappingEffect;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // hifi_ToneMappingEffect_h
|
|
|
@ -1,96 +0,0 @@
|
||||||
//
|
|
||||||
// ToneMappingEffect.cpp
|
|
||||||
// libraries/render-utils/src
|
|
||||||
//
|
|
||||||
// Created by Sam Gateau on 12/7/2015.
|
|
||||||
// Copyright 2015 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#include "ToneMappingEffect.h"
|
|
||||||
|
|
||||||
#include <gpu/Context.h>
|
|
||||||
#include <shaders/Shaders.h>
|
|
||||||
|
|
||||||
#include "render-utils/ShaderConstants.h"
|
|
||||||
#include "StencilMaskPass.h"
|
|
||||||
#include "FramebufferCache.h"
|
|
||||||
|
|
||||||
|
|
||||||
ToneMappingEffect::ToneMappingEffect() {
|
|
||||||
Parameters parameters;
|
|
||||||
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) ¶meters));
|
|
||||||
}
|
|
||||||
|
|
||||||
void ToneMappingEffect::init(RenderArgs* args) {
|
|
||||||
auto blitProgram = gpu::Shader::createProgram(shader::render_utils::program::toneMapping);
|
|
||||||
|
|
||||||
auto blitState = std::make_shared<gpu::State>();
|
|
||||||
blitState->setColorWriteMask(true, true, true, true);
|
|
||||||
_blitLightBuffer = gpu::PipelinePointer(gpu::Pipeline::create(blitProgram, blitState));
|
|
||||||
}
|
|
||||||
|
|
||||||
void ToneMappingEffect::setExposure(float exposure) {
|
|
||||||
auto& params = _parametersBuffer.get<Parameters>();
|
|
||||||
if (params._exposure != exposure) {
|
|
||||||
_parametersBuffer.edit<Parameters>()._exposure = exposure;
|
|
||||||
_parametersBuffer.edit<Parameters>()._twoPowExposure = pow(2.0, exposure);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ToneMappingEffect::setToneCurve(ToneCurve curve) {
|
|
||||||
auto& params = _parametersBuffer.get<Parameters>();
|
|
||||||
if (params._toneCurve != curve) {
|
|
||||||
_parametersBuffer.edit<Parameters>()._toneCurve = curve;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ToneMappingEffect::render(RenderArgs* args, const gpu::TexturePointer& lightingBuffer, const gpu::FramebufferPointer& destinationFramebuffer) {
|
|
||||||
if (!_blitLightBuffer) {
|
|
||||||
init(args);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!lightingBuffer || !destinationFramebuffer) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto framebufferSize = glm::ivec2(lightingBuffer->getDimensions());
|
|
||||||
gpu::doInBatch("ToneMappingEffect::render", args->_context, [&](gpu::Batch& batch) {
|
|
||||||
batch.enableStereo(false);
|
|
||||||
batch.setFramebuffer(destinationFramebuffer);
|
|
||||||
|
|
||||||
// FIXME: Generate the Luminosity map
|
|
||||||
//batch.generateTextureMips(lightingBuffer);
|
|
||||||
|
|
||||||
batch.setViewportTransform(args->_viewport);
|
|
||||||
batch.setProjectionTransform(glm::mat4());
|
|
||||||
batch.resetViewTransform();
|
|
||||||
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(framebufferSize, args->_viewport));
|
|
||||||
batch.setPipeline(_blitLightBuffer);
|
|
||||||
|
|
||||||
batch.setUniformBuffer(render_utils::slot::buffer::ToneMappingParams, _parametersBuffer);
|
|
||||||
batch.setResourceTexture(render_utils::slot::texture::ToneMappingColor, lightingBuffer);
|
|
||||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void ToneMappingDeferred::configure(const Config& config) {
|
|
||||||
_toneMappingEffect.setExposure(config.exposure);
|
|
||||||
_toneMappingEffect.setToneCurve((ToneMappingEffect::ToneCurve)config.curve);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ToneMappingDeferred::run(const render::RenderContextPointer& renderContext, const Input& input, Output& output) {
|
|
||||||
|
|
||||||
auto lightingBuffer = input.get0()->getRenderBuffer(0);
|
|
||||||
auto destFbo = input.get1();
|
|
||||||
|
|
||||||
if (!destFbo) {
|
|
||||||
destFbo = renderContext->args->_blitFramebuffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
_toneMappingEffect.render(renderContext->args, lightingBuffer, destFbo);
|
|
||||||
output = destFbo;
|
|
||||||
}
|
|
|
@ -1 +1,2 @@
|
||||||
VERTEX gpu::vertex::DrawViewportQuadTransformTexcoord
|
VERTEX gpu::vertex::DrawViewportQuadTransformTexcoord
|
||||||
|
DEFINES mirrored:f
|
||||||
|
|
|
@ -43,7 +43,11 @@ layout(location=0) in vec2 varTexCoord0;
|
||||||
layout(location=0) out vec4 outFragColor;
|
layout(location=0) out vec4 outFragColor;
|
||||||
|
|
||||||
void main(void) {
|
void main(void) {
|
||||||
|
<@if HIFI_USE_MIRRORED@>
|
||||||
|
vec4 fragColorRaw = texture(colorMap, vec2(1.0 - varTexCoord0.x, varTexCoord0.y));
|
||||||
|
<@else@>
|
||||||
vec4 fragColorRaw = texture(colorMap, varTexCoord0);
|
vec4 fragColorRaw = texture(colorMap, varTexCoord0);
|
||||||
|
<@endif@>
|
||||||
vec3 fragColor = fragColorRaw.xyz;
|
vec3 fragColor = fragColorRaw.xyz;
|
||||||
|
|
||||||
vec3 srcColor = fragColor * getTwoPowExposure();
|
vec3 srcColor = fragColor * getTwoPowExposure();
|
||||||
|
|
36
libraries/shared/src/shared/WebRTC.h
Normal file
36
libraries/shared/src/shared/WebRTC.h
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
//
|
||||||
|
// WebRTC.h
|
||||||
|
// libraries/shared/src/shared/
|
||||||
|
//
|
||||||
|
// Copyright 2019 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_WebRTC_h
|
||||||
|
#define hifi_WebRTC_h
|
||||||
|
|
||||||
|
#if defined(Q_OS_MAC)
|
||||||
|
# define WEBRTC_ENABLED 1
|
||||||
|
# define WEBRTC_POSIX 1
|
||||||
|
#elif defined(Q_OS_WIN)
|
||||||
|
# define WEBRTC_ENABLED 1
|
||||||
|
# define WEBRTC_WIN 1
|
||||||
|
# define NOMINMAX 1
|
||||||
|
# define WIN32_LEAN_AND_MEAN 1
|
||||||
|
#elif defined(Q_OS_ANDROID)
|
||||||
|
// I don't yet have a working libwebrtc for android
|
||||||
|
// # define WEBRTC_ENABLED 1
|
||||||
|
// # define WEBRTC_POSIX 1
|
||||||
|
#elif defined(Q_OS_LINUX)
|
||||||
|
# define WEBRTC_ENABLED 1
|
||||||
|
# define WEBRTC_POSIX 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(WEBRTC_ENABLED)
|
||||||
|
# include <modules/audio_processing/include/audio_processing.h>
|
||||||
|
# include "modules/audio_processing/audio_processing_impl.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // hifi_WebRTC_h
|
|
@ -45,7 +45,7 @@ Rectangle {
|
||||||
anchors.right: parent.right
|
anchors.right: parent.right
|
||||||
spacing: 5
|
spacing: 5
|
||||||
Repeater {
|
Repeater {
|
||||||
model: [ "MSAA:PrepareFramebuffer:numSamples:4:1"
|
model: [ "MSAA:PreparePrimaryBufferForward:numSamples:4:1"
|
||||||
]
|
]
|
||||||
ConfigSlider {
|
ConfigSlider {
|
||||||
label: qsTr(modelData.split(":")[0])
|
label: qsTr(modelData.split(":")[0])
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include <ResourceManager.h>
|
#include <ResourceManager.h>
|
||||||
#include <ResourceRequestObserver.h>
|
#include <ResourceRequestObserver.h>
|
||||||
#include <ResourceCache.h>
|
#include <ResourceCache.h>
|
||||||
|
#include <material-networking/MaterialCache.h>
|
||||||
#include <material-networking/TextureCache.h>
|
#include <material-networking/TextureCache.h>
|
||||||
#include <hfm/ModelFormatRegistry.h>
|
#include <hfm/ModelFormatRegistry.h>
|
||||||
#include <FBXSerializer.h>
|
#include <FBXSerializer.h>
|
||||||
|
@ -42,6 +43,7 @@ Oven::Oven() {
|
||||||
DependencyManager::set<ResourceRequestObserver>();
|
DependencyManager::set<ResourceRequestObserver>();
|
||||||
DependencyManager::set<ResourceCacheSharedItems>();
|
DependencyManager::set<ResourceCacheSharedItems>();
|
||||||
DependencyManager::set<TextureCache>();
|
DependencyManager::set<TextureCache>();
|
||||||
|
DependencyManager::set<MaterialCache>();
|
||||||
|
|
||||||
MaterialBaker::setNextOvenWorkerThreadOperator([] {
|
MaterialBaker::setNextOvenWorkerThreadOperator([] {
|
||||||
return Oven::instance().getNextWorkerThread();
|
return Oven::instance().getNextWorkerThread();
|
||||||
|
|
Loading…
Reference in a new issue