Merge branch 'master' into bug-fix/web3doverlay-rendering

This commit is contained in:
Anthony Thibault 2016-07-28 11:20:35 -07:00
commit be6e401e9a
26 changed files with 559 additions and 218 deletions

View file

@ -141,18 +141,88 @@ ScrollingWindow {
} }
function addToWorld() { function addToWorld() {
var url = assetProxyModel.data(treeView.selection.currentIndex, 0x103); var defaultURL = assetProxyModel.data(treeView.selection.currentIndex, 0x103);
if (!url || !canAddToWorld(url)) { if (!defaultURL || !canAddToWorld(defaultURL)) {
return; return;
} }
var name = assetProxyModel.data(treeView.selection.currentIndex); var SHAPE_TYPE_NONE = 0;
var SHAPE_TYPE_SIMPLE_HULL = 1;
var SHAPE_TYPE_SIMPLE_COMPOUND = 2;
var SHAPE_TYPE_STATIC_MESH = 3;
console.log("Asset browser - adding asset " + url + " (" + name + ") to world."); var SHAPE_TYPES = [];
SHAPE_TYPES[SHAPE_TYPE_NONE] = "No Collision";
SHAPE_TYPES[SHAPE_TYPE_SIMPLE_HULL] = "Basic - Whole model";
SHAPE_TYPES[SHAPE_TYPE_SIMPLE_COMPOUND] = "Good - Sub-meshes";
SHAPE_TYPES[SHAPE_TYPE_STATIC_MESH] = "Exact - All polygons";
var addPosition = Vec3.sum(MyAvatar.position, Vec3.multiply(2, Quat.getFront(MyAvatar.orientation))); var SHAPE_TYPE_DEFAULT = SHAPE_TYPE_STATIC_MESH;
Entities.addModelEntity(name, url, addPosition); var DYNAMIC_DEFAULT = false;
var prompt = desktop.customInputDialog({
textInput: {
label: "Model URL",
text: defaultURL
},
comboBox: {
label: "Automatic Collisions",
index: SHAPE_TYPE_DEFAULT,
items: SHAPE_TYPES
},
checkBox: {
label: "Dynamic",
checked: DYNAMIC_DEFAULT,
disableForItems: [
SHAPE_TYPE_STATIC_MESH
],
checkStateOnDisable: false,
warningOnDisable: "Models with automatic collisions set to 'Exact' cannot be dynamic"
}
});
prompt.selected.connect(function (jsonResult) {
if (jsonResult) {
var result = JSON.parse(jsonResult);
var url = result.textInput;
var shapeType;
switch (result.comboBox) {
case SHAPE_TYPE_SIMPLE_HULL:
shapeType = "simple-hull";
break;
case SHAPE_TYPE_SIMPLE_COMPOUND:
shapeType = "simple-compound";
break;
case SHAPE_TYPE_STATIC_MESH:
shapeType = "static-mesh";
break;
default:
shapeType = "none";
}
var dynamic = result.checkBox !== null ? result.checkBox : DYNAMIC_DEFAULT;
if (shapeType === "static-mesh" && dynamic) {
// The prompt should prevent this case
print("Error: model cannot be both static mesh and dynamic. This should never happen.");
} else if (url) {
var name = assetProxyModel.data(treeView.selection.currentIndex);
var addPosition = Vec3.sum(MyAvatar.position, Vec3.multiply(2, Quat.getFront(MyAvatar.orientation)));
var gravity;
if (dynamic) {
// Create a vector <0, -10, 0>. { x: 0, y: -10, z: 0 } won't work because Qt is dumb and this is a
// different scripting engine from QTScript.
gravity = Vec3.multiply(Vec3.fromPolar(Math.PI / 2, 0), 10);
} else {
gravity = Vec3.multiply(Vec3.fromPolar(Math.PI / 2, 0), 0);
}
print("Asset browser - adding asset " + url + " (" + name + ") to world.");
// Entities.addEntity doesn't work from QML, so we use this.
Entities.addModelEntity(name, url, shapeType, dynamic, addPosition, gravity);
}
}
});
} }
function copyURLToClipboard(index) { function copyURLToClipboard(index) {

View file

@ -1,4 +1,4 @@
import QtQuick 2.3 import QtQuick 2.5
import QtQuick.Controls 1.2 import QtQuick.Controls 1.2
import QtWebEngine 1.1 import QtWebEngine 1.1
@ -16,6 +16,7 @@ ScrollingWindow {
destroyOnHidden: true destroyOnHidden: true
width: 800 width: 800
height: 600 height: 600
property variant permissionsBar: {'securityOrigin':'none','feature':'none'}
property alias url: webview.url property alias url: webview.url
property alias webView: webview property alias webView: webview
x: 100 x: 100
@ -33,6 +34,19 @@ ScrollingWindow {
} }
} }
function showPermissionsBar(){
permissionsContainer.visible=true;
}
function hidePermissionsBar(){
permissionsContainer.visible=false;
}
function allowPermissions(){
webview.grantFeaturePermission(permissionsBar.securityOrigin, permissionsBar.feature, true);
hidePermissionsBar();
}
Item { Item {
id:item id:item
width: pane.contentWidth width: pane.contentWidth
@ -71,6 +85,7 @@ ScrollingWindow {
size: 48 size: 48
MouseArea { anchors.fill: parent; onClicked: webview.goForward() } MouseArea { anchors.fill: parent; onClicked: webview.goForward() }
} }
} }
Item { Item {
@ -117,6 +132,7 @@ ScrollingWindow {
if (text.indexOf("http") != 0) { if (text.indexOf("http") != 0) {
text = "http://" + text text = "http://" + text
} }
root.hidePermissionsBar();
webview.url = text webview.url = text
break; break;
} }
@ -124,14 +140,76 @@ ScrollingWindow {
} }
} }
Rectangle {
id:permissionsContainer
visible:false
color: "#000000"
width: parent.width
anchors.top: buttons.bottom
height:40
z:100
gradient: Gradient {
GradientStop { position: 0.0; color: "black" }
GradientStop { position: 1.0; color: "grey" }
}
RalewayLight {
id: permissionsInfo
anchors.right:permissionsRow.left
anchors.rightMargin: 32
anchors.topMargin:8
anchors.top:parent.top
text: "This site wants to use your microphone/camera"
size: 18
color: hifi.colors.white
}
Row {
id: permissionsRow
spacing: 4
anchors.top:parent.top
anchors.topMargin: 8
anchors.right: parent.right
visible: true
z:101
Button {
id:allow
text: "Allow"
color: hifi.buttons.blue
colorScheme: root.colorScheme
width: 120
enabled: true
onClicked: root.allowPermissions();
z:101
}
Button {
id:block
text: "Block"
color: hifi.buttons.red
colorScheme: root.colorScheme
width: 120
enabled: true
onClicked: root.hidePermissionsBar();
z:101
}
}
}
WebEngineView { WebEngineView {
id: webview id: webview
url: "http://highfidelity.com" url: "https://highfidelity.com"
anchors.top: buttons.bottom anchors.top: buttons.bottom
anchors.topMargin: 8 anchors.topMargin: 8
anchors.bottom: parent.bottom anchors.bottom: parent.bottom
anchors.left: parent.left anchors.left: parent.left
anchors.right: parent.right anchors.right: parent.right
onFeaturePermissionRequested: {
permissionsBar.securityOrigin = securityOrigin;
permissionsBar.feature = feature;
root.showPermissionsBar();
}
onLoadingChanged: { onLoadingChanged: {
if (loadRequest.status === WebEngineView.LoadSucceededStatus) { if (loadRequest.status === WebEngineView.LoadSucceededStatus) {
addressBar.text = loadRequest.url addressBar.text = loadRequest.url
@ -140,9 +218,12 @@ ScrollingWindow {
onIconChanged: { onIconChanged: {
console.log("New icon: " + icon) console.log("New icon: " + icon)
} }
onNewViewRequested:{
var component = Qt.createComponent("Browser.qml");
var newWindow = component.createObject(desktop);
request.openIn(newWindow.webView)
}
//profile: desktop.browserProfile //profile: desktop.browserProfile
} }
} // item } // item
@ -158,4 +239,4 @@ ScrollingWindow {
break; break;
} }
} }
} // dialog } // dialog

View file

@ -15,7 +15,7 @@ WebEngineView {
id: root id: root
property var newUrl; property var newUrl;
profile.httpUserAgent: "Mozilla/5.0 Chrome (HighFidelityInterface)" profile.httpUserAgent: "Mozilla/5.0 Chrome/38.0 (HighFidelityInterface)"
Component.onCompleted: { Component.onCompleted: {
console.log("Connecting JS messaging to Hifi Logging") console.log("Connecting JS messaging to Hifi Logging")
@ -48,10 +48,6 @@ WebEngineView {
} }
} }
onFeaturePermissionRequested: {
grantFeaturePermission(securityOrigin, feature, true);
}
onLoadingChanged: { onLoadingChanged: {
// Required to support clicking on "hifi://" links // Required to support clicking on "hifi://" links
if (WebEngineView.LoadStartedStatus == loadRequest.status) { if (WebEngineView.LoadStartedStatus == loadRequest.status) {

View file

@ -1689,6 +1689,17 @@ void Application::paintGL() {
renderArgs._context->syncCache(); renderArgs._context->syncCache();
} }
auto framebufferCache = DependencyManager::get<FramebufferCache>();
// Final framebuffer that will be handled to the display-plugin
auto finalFramebuffer = framebufferCache->getFramebuffer();
_gpuContext->beginFrame(finalFramebuffer, getHMDSensorPose());
// Reset the gpu::Context Stages
// Back to the default framebuffer;
gpu::doInBatch(_gpuContext, [&](gpu::Batch& batch) {
batch.resetStages();
});
auto inputs = AvatarInputs::getInstance(); auto inputs = AvatarInputs::getInstance();
if (inputs->mirrorVisible()) { if (inputs->mirrorVisible()) {
PerformanceTimer perfTimer("Mirror"); PerformanceTimer perfTimer("Mirror");
@ -1711,10 +1722,6 @@ void Application::paintGL() {
QSize size = getDeviceSize(); QSize size = getDeviceSize();
renderArgs._viewport = glm::ivec4(0, 0, size.width(), size.height()); renderArgs._viewport = glm::ivec4(0, 0, size.width(), size.height());
_applicationOverlay.renderOverlay(&renderArgs); _applicationOverlay.renderOverlay(&renderArgs);
auto overlayTexture = _applicationOverlay.acquireOverlay();
if (overlayTexture) {
displayPlugin->submitOverlayTexture(overlayTexture);
}
} }
glm::vec3 boomOffset; glm::vec3 boomOffset;
@ -1816,12 +1823,8 @@ void Application::paintGL() {
getApplicationCompositor().setFrameInfo(_frameCount, _myCamera.getTransform()); getApplicationCompositor().setFrameInfo(_frameCount, _myCamera.getTransform());
// Primary rendering pass // Primary rendering pass
auto framebufferCache = DependencyManager::get<FramebufferCache>();
const QSize size = framebufferCache->getFrameBufferSize(); const QSize size = framebufferCache->getFrameBufferSize();
// Final framebuffer that will be handled to the display-plugin
auto finalFramebuffer = framebufferCache->getFramebuffer();
{ {
PROFILE_RANGE(__FUNCTION__ "/mainRender"); PROFILE_RANGE(__FUNCTION__ "/mainRender");
PerformanceTimer perfTimer("mainRender"); PerformanceTimer perfTimer("mainRender");
@ -1880,6 +1883,13 @@ void Application::paintGL() {
renderArgs._context->enableStereo(false); renderArgs._context->enableStereo(false);
} }
_gpuContext->endFrame();
gpu::TexturePointer overlayTexture = _applicationOverlay.acquireOverlay();
if (overlayTexture) {
displayPlugin->submitOverlayTexture(overlayTexture);
}
// deliver final composited scene to the display plugin // deliver final composited scene to the display plugin
{ {
PROFILE_RANGE(__FUNCTION__ "/pluginOutput"); PROFILE_RANGE(__FUNCTION__ "/pluginOutput");
@ -1900,11 +1910,6 @@ void Application::paintGL() {
{ {
Stats::getInstance()->setRenderDetails(renderArgs._details); Stats::getInstance()->setRenderDetails(renderArgs._details);
// Reset the gpu::Context Stages
// Back to the default framebuffer;
gpu::doInBatch(renderArgs._context, [&](gpu::Batch& batch) {
batch.resetStages();
});
} }
uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin; uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
@ -4678,6 +4683,11 @@ void Application::packetSent(quint64 length) {
} }
void Application::registerScriptEngineWithApplicationServices(ScriptEngine* scriptEngine) { void Application::registerScriptEngineWithApplicationServices(ScriptEngine* scriptEngine) {
scriptEngine->setEmitScriptUpdatesFunction([this]() {
return isPhysicsEnabled();
});
// setup the packet senders and jurisdiction listeners of the script engine's scripting interfaces so // setup the packet senders and jurisdiction listeners of the script engine's scripting interfaces so
// we can use the same ones from the application. // we can use the same ones from the application.
auto entityScriptingInterface = DependencyManager::get<EntityScriptingInterface>(); auto entityScriptingInterface = DependencyManager::get<EntityScriptingInterface>();

View file

@ -215,6 +215,7 @@ public:
qint64 getCurrentSessionRuntime() const { return _sessionRunTimer.elapsed(); } qint64 getCurrentSessionRuntime() const { return _sessionRunTimer.elapsed(); }
bool isAboutToQuit() const { return _aboutToQuit; } bool isAboutToQuit() const { return _aboutToQuit; }
bool isPhysicsEnabled() const { return _physicsEnabled; }
// the isHMDMode is true whenever we use the interface from an HMD and not a standard flat display // the isHMDMode is true whenever we use the interface from an HMD and not a standard flat display
// rendering of several elements depend on that // rendering of several elements depend on that

View file

@ -256,6 +256,8 @@ QByteArray AvatarData::toByteArray(bool cullSmallChanges, bool sendAll) {
destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float); destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float);
} }
QReadLocker readLock(&_jointDataLock);
// joint rotation data // joint rotation data
*destinationBuffer++ = _jointData.size(); *destinationBuffer++ = _jointData.size();
unsigned char* validityPosition = destinationBuffer; unsigned char* validityPosition = destinationBuffer;
@ -378,6 +380,7 @@ QByteArray AvatarData::toByteArray(bool cullSmallChanges, bool sendAll) {
void AvatarData::doneEncoding(bool cullSmallChanges) { void AvatarData::doneEncoding(bool cullSmallChanges) {
// The server has finished sending this version of the joint-data to other nodes. Update _lastSentJointData. // The server has finished sending this version of the joint-data to other nodes. Update _lastSentJointData.
QReadLocker readLock(&_jointDataLock);
_lastSentJointData.resize(_jointData.size()); _lastSentJointData.resize(_jointData.size());
for (int i = 0; i < _jointData.size(); i ++) { for (int i = 0; i < _jointData.size(); i ++) {
const JointData& data = _jointData[ i ]; const JointData& data = _jointData[ i ];
@ -551,8 +554,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); PACKET_READ_CHECK(NumJoints, sizeof(uint8_t));
int numJoints = *sourceBuffer++; int numJoints = *sourceBuffer++;
_jointData.resize(numJoints);
const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE);
PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity);
@ -576,6 +577,9 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
} }
// each joint rotation is stored in 6 bytes. // each joint rotation is stored in 6 bytes.
QWriteLocker writeLock(&_jointDataLock);
_jointData.resize(numJoints);
const int COMPRESSED_QUATERNION_SIZE = 6; const int COMPRESSED_QUATERNION_SIZE = 6;
PACKET_READ_CHECK(JointRotations, numValidJointRotations * COMPRESSED_QUATERNION_SIZE); PACKET_READ_CHECK(JointRotations, numValidJointRotations * COMPRESSED_QUATERNION_SIZE);
for (int i = 0; i < numJoints; i++) { for (int i = 0; i < numJoints; i++) {
@ -653,6 +657,7 @@ void AvatarData::setRawJointData(QVector<JointData> data) {
QMetaObject::invokeMethod(this, "setRawJointData", Q_ARG(QVector<JointData>, data)); QMetaObject::invokeMethod(this, "setRawJointData", Q_ARG(QVector<JointData>, data));
return; return;
} }
QWriteLocker writeLock(&_jointDataLock);
_jointData = data; _jointData = data;
} }
@ -664,6 +669,7 @@ void AvatarData::setJointData(int index, const glm::quat& rotation, const glm::v
QMetaObject::invokeMethod(this, "setJointData", Q_ARG(int, index), Q_ARG(const glm::quat&, rotation)); QMetaObject::invokeMethod(this, "setJointData", Q_ARG(int, index), Q_ARG(const glm::quat&, rotation));
return; return;
} }
QWriteLocker writeLock(&_jointDataLock);
if (_jointData.size() <= index) { if (_jointData.size() <= index) {
_jointData.resize(index + 1); _jointData.resize(index + 1);
} }
@ -682,6 +688,8 @@ void AvatarData::clearJointData(int index) {
QMetaObject::invokeMethod(this, "clearJointData", Q_ARG(int, index)); QMetaObject::invokeMethod(this, "clearJointData", Q_ARG(int, index));
return; return;
} }
QWriteLocker writeLock(&_jointDataLock);
// FIXME: I don't understand how this "clears" the joint data at index
if (_jointData.size() <= index) { if (_jointData.size() <= index) {
_jointData.resize(index + 1); _jointData.resize(index + 1);
} }
@ -710,6 +718,7 @@ glm::quat AvatarData::getJointRotation(int index) const {
Q_RETURN_ARG(glm::quat, result), Q_ARG(int, index)); Q_RETURN_ARG(glm::quat, result), Q_ARG(int, index));
return result; return result;
} }
QReadLocker readLock(&_jointDataLock);
return index < _jointData.size() ? _jointData.at(index).rotation : glm::quat(); return index < _jointData.size() ? _jointData.at(index).rotation : glm::quat();
} }
@ -724,6 +733,7 @@ glm::vec3 AvatarData::getJointTranslation(int index) const {
Q_RETURN_ARG(glm::vec3, result), Q_ARG(int, index)); Q_RETURN_ARG(glm::vec3, result), Q_ARG(int, index));
return result; return result;
} }
QReadLocker readLock(&_jointDataLock);
return index < _jointData.size() ? _jointData.at(index).translation : glm::vec3(); return index < _jointData.size() ? _jointData.at(index).translation : glm::vec3();
} }
@ -771,6 +781,7 @@ void AvatarData::setJointRotation(int index, const glm::quat& rotation) {
QMetaObject::invokeMethod(this, "setJointRotation", Q_ARG(int, index), Q_ARG(const glm::quat&, rotation)); QMetaObject::invokeMethod(this, "setJointRotation", Q_ARG(int, index), Q_ARG(const glm::quat&, rotation));
return; return;
} }
QWriteLocker writeLock(&_jointDataLock);
if (_jointData.size() <= index) { if (_jointData.size() <= index) {
_jointData.resize(index + 1); _jointData.resize(index + 1);
} }
@ -787,6 +798,7 @@ void AvatarData::setJointTranslation(int index, const glm::vec3& translation) {
QMetaObject::invokeMethod(this, "setJointTranslation", Q_ARG(int, index), Q_ARG(const glm::vec3&, translation)); QMetaObject::invokeMethod(this, "setJointTranslation", Q_ARG(int, index), Q_ARG(const glm::vec3&, translation));
return; return;
} }
QWriteLocker writeLock(&_jointDataLock);
if (_jointData.size() <= index) { if (_jointData.size() <= index) {
_jointData.resize(index + 1); _jointData.resize(index + 1);
} }
@ -831,6 +843,7 @@ QVector<glm::quat> AvatarData::getJointRotations() const {
Q_RETURN_ARG(QVector<glm::quat>, result)); Q_RETURN_ARG(QVector<glm::quat>, result));
return result; return result;
} }
QReadLocker readLock(&_jointDataLock);
QVector<glm::quat> jointRotations(_jointData.size()); QVector<glm::quat> jointRotations(_jointData.size());
for (int i = 0; i < _jointData.size(); ++i) { for (int i = 0; i < _jointData.size(); ++i) {
jointRotations[i] = _jointData[i].rotation; jointRotations[i] = _jointData[i].rotation;
@ -845,6 +858,7 @@ void AvatarData::setJointRotations(QVector<glm::quat> jointRotations) {
"setJointRotations", Qt::BlockingQueuedConnection, "setJointRotations", Qt::BlockingQueuedConnection,
Q_ARG(QVector<glm::quat>, jointRotations)); Q_ARG(QVector<glm::quat>, jointRotations));
} }
QWriteLocker writeLock(&_jointDataLock);
if (_jointData.size() < jointRotations.size()) { if (_jointData.size() < jointRotations.size()) {
_jointData.resize(jointRotations.size()); _jointData.resize(jointRotations.size());
} }
@ -862,6 +876,7 @@ void AvatarData::setJointTranslations(QVector<glm::vec3> jointTranslations) {
"setJointTranslations", Qt::BlockingQueuedConnection, "setJointTranslations", Qt::BlockingQueuedConnection,
Q_ARG(QVector<glm::vec3>, jointTranslations)); Q_ARG(QVector<glm::vec3>, jointTranslations));
} }
QWriteLocker writeLock(&_jointDataLock);
if (_jointData.size() < jointTranslations.size()) { if (_jointData.size() < jointTranslations.size()) {
_jointData.resize(jointTranslations.size()); _jointData.resize(jointTranslations.size());
} }
@ -873,11 +888,23 @@ void AvatarData::setJointTranslations(QVector<glm::vec3> jointTranslations) {
} }
void AvatarData::clearJointsData() { void AvatarData::clearJointsData() {
// FIXME: this method is terribly inefficient and probably doesn't even work
// (see implementation of clearJointData(index))
for (int i = 0; i < _jointData.size(); ++i) { for (int i = 0; i < _jointData.size(); ++i) {
clearJointData(i); clearJointData(i);
} }
} }
int AvatarData::getJointIndex(const QString& name) const {
QReadLocker readLock(&_jointDataLock);
return _jointIndices.value(name) - 1;
}
QStringList AvatarData::getJointNames() const {
QReadLocker readLock(&_jointDataLock);
return _jointNames;
}
void AvatarData::parseAvatarIdentityPacket(const QByteArray& data, Identity& identityOut) { void AvatarData::parseAvatarIdentityPacket(const QByteArray& data, Identity& identityOut) {
QDataStream packetStream(data); QDataStream packetStream(data);
@ -1027,38 +1054,41 @@ void AvatarData::detachAll(const QString& modelURL, const QString& jointName) {
void AvatarData::setJointMappingsFromNetworkReply() { void AvatarData::setJointMappingsFromNetworkReply() {
QNetworkReply* networkReply = static_cast<QNetworkReply*>(sender()); QNetworkReply* networkReply = static_cast<QNetworkReply*>(sender());
QByteArray line; {
while (!(line = networkReply->readLine()).isEmpty()) { QWriteLocker writeLock(&_jointDataLock);
line = line.trimmed(); QByteArray line;
if (line.startsWith("filename")) { while (!(line = networkReply->readLine()).isEmpty()) {
int filenameIndex = line.indexOf('=') + 1; line = line.trimmed();
if (filenameIndex > 0) { if (line.startsWith("filename")) {
_skeletonFBXURL = _skeletonModelURL.resolved(QString(line.mid(filenameIndex).trimmed())); int filenameIndex = line.indexOf('=') + 1;
if (filenameIndex > 0) {
_skeletonFBXURL = _skeletonModelURL.resolved(QString(line.mid(filenameIndex).trimmed()));
}
} }
if (!line.startsWith("jointIndex")) {
continue;
} }
if (!line.startsWith("jointIndex")) { int jointNameIndex = line.indexOf('=') + 1;
continue; if (jointNameIndex == 0) {
} continue;
int jointNameIndex = line.indexOf('=') + 1; }
if (jointNameIndex == 0) { int secondSeparatorIndex = line.indexOf('=', jointNameIndex);
continue; if (secondSeparatorIndex == -1) {
} continue;
int secondSeparatorIndex = line.indexOf('=', jointNameIndex); }
if (secondSeparatorIndex == -1) { QString jointName = line.mid(jointNameIndex, secondSeparatorIndex - jointNameIndex).trimmed();
continue; bool ok;
} int jointIndex = line.mid(secondSeparatorIndex + 1).trimmed().toInt(&ok);
QString jointName = line.mid(jointNameIndex, secondSeparatorIndex - jointNameIndex).trimmed(); if (ok) {
bool ok; while (_jointNames.size() < jointIndex + 1) {
int jointIndex = line.mid(secondSeparatorIndex + 1).trimmed().toInt(&ok); _jointNames.append(QString());
if (ok) { }
while (_jointNames.size() < jointIndex + 1) { _jointNames[jointIndex] = jointName;
_jointNames.append(QString());
} }
_jointNames[jointIndex] = jointName;
} }
} for (int i = 0; i < _jointNames.size(); i++) {
for (int i = 0; i < _jointNames.size(); i++) { _jointIndices.insert(_jointNames.at(i), i + 1);
_jointIndices.insert(_jointNames.at(i), i + 1); }
} }
networkReply->deleteLater(); networkReply->deleteLater();
@ -1101,16 +1131,19 @@ void AvatarData::sendIdentityPacket() {
} }
void AvatarData::updateJointMappings() { void AvatarData::updateJointMappings() {
_jointIndices.clear(); {
_jointNames.clear(); QWriteLocker writeLock(&_jointDataLock);
_jointData.clear(); _jointIndices.clear();
_jointNames.clear();
_jointData.clear();
}
if (_skeletonModelURL.fileName().toLower().endsWith(".fst")) { if (_skeletonModelURL.fileName().toLower().endsWith(".fst")) {
QNetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance(); QNetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance();
QNetworkRequest networkRequest = QNetworkRequest(_skeletonModelURL); QNetworkRequest networkRequest = QNetworkRequest(_skeletonModelURL);
networkRequest.setHeader(QNetworkRequest::UserAgentHeader, HIGH_FIDELITY_USER_AGENT); networkRequest.setHeader(QNetworkRequest::UserAgentHeader, HIGH_FIDELITY_USER_AGENT);
QNetworkReply* networkReply = networkAccessManager.get(networkRequest); QNetworkReply* networkReply = networkAccessManager.get(networkRequest);
connect(networkReply, SIGNAL(finished()), this, SLOT(setJointMappingsFromNetworkReply())); connect(networkReply, &QNetworkReply::finished, this, &AvatarData::setJointMappingsFromNetworkReply);
} }
} }

View file

@ -271,9 +271,9 @@ public:
Q_INVOKABLE virtual void clearJointsData(); Q_INVOKABLE virtual void clearJointsData();
/// Returns the index of the joint with the specified name, or -1 if not found/unknown. /// Returns the index of the joint with the specified name, or -1 if not found/unknown.
Q_INVOKABLE virtual int getJointIndex(const QString& name) const { return _jointIndices.value(name) - 1; } Q_INVOKABLE virtual int getJointIndex(const QString& name) const;
Q_INVOKABLE virtual QStringList getJointNames() const { return _jointNames; } Q_INVOKABLE virtual QStringList getJointNames() const;
Q_INVOKABLE void setBlendshape(QString name, float val) { _headData->setBlendshape(name, val); } Q_INVOKABLE void setBlendshape(QString name, float val) { _headData->setBlendshape(name, val); }
@ -374,6 +374,7 @@ protected:
QVector<JointData> _jointData; ///< the state of the skeleton joints QVector<JointData> _jointData; ///< the state of the skeleton joints
QVector<JointData> _lastSentJointData; ///< the state of the skeleton joints last time we transmitted QVector<JointData> _lastSentJointData; ///< the state of the skeleton joints last time we transmitted
mutable QReadWriteLock _jointDataLock;
// key state // key state
KeyState _keyState; KeyState _keyState;

View file

@ -198,12 +198,16 @@ QUuid EntityScriptingInterface::addEntity(const EntityItemProperties& properties
} }
} }
QUuid EntityScriptingInterface::addModelEntity(const QString& name, const QString& modelUrl, const glm::vec3& position) { QUuid EntityScriptingInterface::addModelEntity(const QString& name, const QString& modelUrl, const QString& shapeType,
bool dynamic, const glm::vec3& position, const glm::vec3& gravity) {
EntityItemProperties properties; EntityItemProperties properties;
properties.setType(EntityTypes::Model); properties.setType(EntityTypes::Model);
properties.setName(name); properties.setName(name);
properties.setModelURL(modelUrl); properties.setModelURL(modelUrl);
properties.setShapeTypeFromString(shapeType);
properties.setDynamic(dynamic);
properties.setPosition(position); properties.setPosition(position);
properties.setGravity(gravity);
return addEntity(properties); return addEntity(properties);
} }

View file

@ -86,7 +86,8 @@ public slots:
Q_INVOKABLE QUuid addEntity(const EntityItemProperties& properties, bool clientOnly = false); Q_INVOKABLE QUuid addEntity(const EntityItemProperties& properties, bool clientOnly = false);
/// temporary method until addEntity can be used from QJSEngine /// temporary method until addEntity can be used from QJSEngine
Q_INVOKABLE QUuid addModelEntity(const QString& name, const QString& modelUrl, const glm::vec3& position); Q_INVOKABLE QUuid addModelEntity(const QString& name, const QString& modelUrl, const QString& shapeType, bool dynamic,
const glm::vec3& position, const glm::vec3& gravity);
/// gets the current model properties for a specific model /// gets the current model properties for a specific model
/// this function will not find return results in script engine contexts which don't have access to models /// this function will not find return results in script engine contexts which don't have access to models

View file

@ -158,69 +158,76 @@ void GLBackend::do_setStateDepthBias(Vec2 bias) {
} }
void GLBackend::do_setStateDepthTest(State::DepthTest test) { void GLBackend::do_setStateDepthTest(State::DepthTest test) {
if (_pipeline._stateCache.depthTest != test) { const auto& current = _pipeline._stateCache.depthTest;
if (current != test) {
if (test.isEnabled()) { if (test.isEnabled()) {
glEnable(GL_DEPTH_TEST); glEnable(GL_DEPTH_TEST);
glDepthMask(test.getWriteMask());
glDepthFunc(COMPARISON_TO_GL[test.getFunction()]);
} else { } else {
glDisable(GL_DEPTH_TEST); glDisable(GL_DEPTH_TEST);
} }
if (test.getWriteMask() != current.getWriteMask()) {
glDepthMask(test.getWriteMask());
}
if (test.getFunction() != current.getFunction()) {
glDepthFunc(COMPARISON_TO_GL[test.getFunction()]);
}
if (CHECK_GL_ERROR()) { if (CHECK_GL_ERROR()) {
qDebug() << "DepthTest" << (test.isEnabled() ? "Enabled" : "Disabled") qDebug() << "DepthTest" << (test.isEnabled() ? "Enabled" : "Disabled")
<< "Mask=" << (test.getWriteMask() ? "Write" : "no Write") << "Mask=" << (test.getWriteMask() ? "Write" : "no Write")
<< "Func=" << test.getFunction() << "Func=" << test.getFunction()
<< "Raw=" << test.getRaw(); << "Raw=" << test.getRaw();
} }
_pipeline._stateCache.depthTest = test; _pipeline._stateCache.depthTest = test;
} }
} }
void GLBackend::do_setStateStencil(State::StencilActivation activation, State::StencilTest frontTest, State::StencilTest backTest) { void GLBackend::do_setStateStencil(State::StencilActivation activation, State::StencilTest testFront, State::StencilTest testBack) {
const auto& currentActivation = _pipeline._stateCache.stencilActivation;
if ((_pipeline._stateCache.stencilActivation != activation) const auto& currentTestFront = _pipeline._stateCache.stencilTestFront;
|| (_pipeline._stateCache.stencilTestFront != frontTest) const auto& currentTestBack = _pipeline._stateCache.stencilTestBack;
|| (_pipeline._stateCache.stencilTestBack != backTest)) { if ((currentActivation != activation)
|| (currentTestFront != testFront)
|| (currentTestBack != testBack)) {
if (activation.isEnabled()) { if (activation.isEnabled()) {
glEnable(GL_STENCIL_TEST); glEnable(GL_STENCIL_TEST);
if (activation.getWriteMaskFront() != activation.getWriteMaskBack()) {
glStencilMaskSeparate(GL_FRONT, activation.getWriteMaskFront());
glStencilMaskSeparate(GL_BACK, activation.getWriteMaskBack());
} else {
glStencilMask(activation.getWriteMaskFront());
}
static GLenum STENCIL_OPS[] = {
GL_KEEP,
GL_ZERO,
GL_REPLACE,
GL_INCR_WRAP,
GL_DECR_WRAP,
GL_INVERT,
GL_INCR,
GL_DECR };
if (frontTest != backTest) {
glStencilOpSeparate(GL_FRONT, STENCIL_OPS[frontTest.getFailOp()], STENCIL_OPS[frontTest.getPassOp()], STENCIL_OPS[frontTest.getDepthFailOp()]);
glStencilFuncSeparate(GL_FRONT, COMPARISON_TO_GL[frontTest.getFunction()], frontTest.getReference(), frontTest.getReadMask());
glStencilOpSeparate(GL_BACK, STENCIL_OPS[backTest.getFailOp()], STENCIL_OPS[backTest.getPassOp()], STENCIL_OPS[backTest.getDepthFailOp()]);
glStencilFuncSeparate(GL_BACK, COMPARISON_TO_GL[backTest.getFunction()], backTest.getReference(), backTest.getReadMask());
} else {
glStencilOp(STENCIL_OPS[frontTest.getFailOp()], STENCIL_OPS[frontTest.getPassOp()], STENCIL_OPS[frontTest.getDepthFailOp()]);
glStencilFunc(COMPARISON_TO_GL[frontTest.getFunction()], frontTest.getReference(), frontTest.getReadMask());
}
} else { } else {
glDisable(GL_STENCIL_TEST); glDisable(GL_STENCIL_TEST);
} }
if (activation.getWriteMaskFront() != activation.getWriteMaskBack()) {
glStencilMaskSeparate(GL_FRONT, activation.getWriteMaskFront());
glStencilMaskSeparate(GL_BACK, activation.getWriteMaskBack());
} else {
glStencilMask(activation.getWriteMaskFront());
}
static GLenum STENCIL_OPS[State::NUM_STENCIL_OPS] = {
GL_KEEP,
GL_ZERO,
GL_REPLACE,
GL_INCR_WRAP,
GL_DECR_WRAP,
GL_INVERT,
GL_INCR,
GL_DECR };
if (testFront != testBack) {
glStencilOpSeparate(GL_FRONT, STENCIL_OPS[testFront.getFailOp()], STENCIL_OPS[testFront.getPassOp()], STENCIL_OPS[testFront.getDepthFailOp()]);
glStencilFuncSeparate(GL_FRONT, COMPARISON_TO_GL[testFront.getFunction()], testFront.getReference(), testFront.getReadMask());
glStencilOpSeparate(GL_BACK, STENCIL_OPS[testBack.getFailOp()], STENCIL_OPS[testBack.getPassOp()], STENCIL_OPS[testBack.getDepthFailOp()]);
glStencilFuncSeparate(GL_BACK, COMPARISON_TO_GL[testBack.getFunction()], testBack.getReference(), testBack.getReadMask());
} else {
glStencilOp(STENCIL_OPS[testFront.getFailOp()], STENCIL_OPS[testFront.getPassOp()], STENCIL_OPS[testFront.getDepthFailOp()]);
glStencilFunc(COMPARISON_TO_GL[testFront.getFunction()], testFront.getReference(), testFront.getReadMask());
}
(void)CHECK_GL_ERROR(); (void)CHECK_GL_ERROR();
_pipeline._stateCache.stencilActivation = activation; _pipeline._stateCache.stencilActivation = activation;
_pipeline._stateCache.stencilTestFront = frontTest; _pipeline._stateCache.stencilTestFront = testFront;
_pipeline._stateCache.stencilTestBack = backTest; _pipeline._stateCache.stencilTestBack = testBack;
} }
} }

View file

@ -46,6 +46,33 @@ Batch::Batch() {
_drawCallInfos.reserve(_drawCallInfosMax); _drawCallInfos.reserve(_drawCallInfosMax);
} }
Batch::Batch(const Batch& batch_) {
Batch& batch = *const_cast<Batch*>(&batch_);
_commands.swap(batch._commands);
_commandOffsets.swap(batch._commandOffsets);
_params.swap(batch._params);
_data.swap(batch._data);
_invalidModel = batch._invalidModel;
_currentModel = batch._currentModel;
_objects.swap(batch._objects);
_currentNamedCall = batch._currentNamedCall;
_buffers._items.swap(batch._buffers._items);
_textures._items.swap(batch._textures._items);
_streamFormats._items.swap(batch._streamFormats._items);
_transforms._items.swap(batch._transforms._items);
_pipelines._items.swap(batch._pipelines._items);
_framebuffers._items.swap(batch._framebuffers._items);
_drawCallInfos.swap(batch._drawCallInfos);
_queries._items.swap(batch._queries._items);
_lambdas._items.swap(batch._lambdas._items);
_profileRanges._items.swap(batch._profileRanges._items);
_names._items.swap(batch._names._items);
_namedData.swap(batch._namedData);
_enableStereo = batch._enableStereo;
_enableSkybox = batch._enableSkybox;
}
Batch::~Batch() { Batch::~Batch() {
_commandsMax = std::max(_commands.size(), _commandsMax); _commandsMax = std::max(_commands.size(), _commandsMax);
_commandOffsetsMax = std::max(_commandOffsets.size(), _commandOffsetsMax); _commandOffsetsMax = std::max(_commandOffsets.size(), _commandOffsetsMax);

View file

@ -9,7 +9,7 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
// //
#include "Context.h" #include "Context.h"
#include "Frame.h"
using namespace gpu; using namespace gpu;
Context::CreateBackend Context::_createBackendCallback = nullptr; Context::CreateBackend Context::_createBackendCallback = nullptr;
@ -20,6 +20,13 @@ Context::Context() {
if (_createBackendCallback) { if (_createBackendCallback) {
_backend.reset(_createBackendCallback()); _backend.reset(_createBackendCallback());
} }
_frameHandler = [this](Frame& frame){
for (size_t i = 0; i < frame.batches.size(); ++i) {
_backend->_stereo = frame.stereoStates[i];
_backend->render(frame.batches[i]);
}
};
} }
Context::Context(const Context& context) { Context::Context(const Context& context) {
@ -28,6 +35,43 @@ Context::Context(const Context& context) {
Context::~Context() { Context::~Context() {
} }
void Context::setFrameHandler(FrameHandler handler) {
_frameHandler = handler;
}
#define DEFERRED_RENDERING
void Context::beginFrame(const FramebufferPointer& outputFramebuffer, const glm::mat4& renderPose) {
_currentFrame = Frame();
_currentFrame.framebuffer = outputFramebuffer;
_currentFrame.pose = renderPose;
_frameActive = true;
}
void Context::append(Batch& batch) {
if (!_frameActive) {
qWarning() << "Batch executed outside of frame boundaries";
}
#ifdef DEFERRED_RENDERING
_currentFrame.batches.emplace_back(batch);
_currentFrame.stereoStates.emplace_back(_stereo);
#else
_backend->_stereo = _stereo;
_backend->render(batch);
#endif
}
void Context::endFrame() {
#ifdef DEFERRED_RENDERING
if (_frameHandler) {
_frameHandler(_currentFrame);
}
#endif
_currentFrame = Frame();
_frameActive = false;
}
bool Context::makeProgram(Shader& shader, const Shader::BindingSet& bindings) { bool Context::makeProgram(Shader& shader, const Shader::BindingSet& bindings) {
if (shader.isProgram() && _makeProgramCallback) { if (shader.isProgram() && _makeProgramCallback) {
return _makeProgramCallback(shader, bindings); return _makeProgramCallback(shader, bindings);
@ -35,36 +79,38 @@ bool Context::makeProgram(Shader& shader, const Shader::BindingSet& bindings) {
return false; return false;
} }
void Context::render(Batch& batch) {
PROFILE_RANGE(__FUNCTION__);
_backend->render(batch);
}
void Context::enableStereo(bool enable) { void Context::enableStereo(bool enable) {
_backend->enableStereo(enable); _stereo._enable = enable;
} }
bool Context::isStereo() { bool Context::isStereo() {
return _backend->isStereo(); return _stereo._enable;
} }
void Context::setStereoProjections(const mat4 eyeProjections[2]) { void Context::setStereoProjections(const mat4 eyeProjections[2]) {
_backend->setStereoProjections(eyeProjections); for (int i = 0; i < 2; ++i) {
_stereo._eyeProjections[i] = eyeProjections[i];
}
} }
void Context::setStereoViews(const mat4 eyeViews[2]) { void Context::setStereoViews(const mat4 views[2]) {
_backend->setStereoViews(eyeViews); for (int i = 0; i < 2; ++i) {
_stereo._eyeViews[i] = views[i];
}
} }
void Context::getStereoProjections(mat4* eyeProjections) const { void Context::getStereoProjections(mat4* eyeProjections) const {
_backend->getStereoProjections(eyeProjections); for (int i = 0; i < 2; ++i) {
eyeProjections[i] = _stereo._eyeProjections[i];
}
} }
void Context::getStereoViews(mat4* eyeViews) const { void Context::getStereoViews(mat4* eyeViews) const {
_backend->getStereoViews(eyeViews); for (int i = 0; i < 2; ++i) {
eyeViews[i] = _stereo._eyeViews[i];
}
} }
void Context::syncCache() { void Context::syncCache() {
PROFILE_RANGE(__FUNCTION__); PROFILE_RANGE(__FUNCTION__);
_backend->syncCache(); _backend->syncCache();
@ -103,12 +149,12 @@ Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const S
if (!_stereo._skybox) { if (!_stereo._skybox) {
offsetTransform.postTranslate(-Vec3(_stereo._eyeViews[eye][3])); offsetTransform.postTranslate(-Vec3(_stereo._eyeViews[eye][3]));
} else { } else {
// FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future // FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future
} }
result._projection = _stereo._eyeProjections[eye]; result._projection = _stereo._eyeProjections[eye];
result.recomputeDerived(offsetTransform); result.recomputeDerived(offsetTransform);
result._stereoInfo = Vec4(1.0f, (float) eye, 0.0f, 0.0f); result._stereoInfo = Vec4(1.0f, (float)eye, 0.0f, 0.0f);
return result; return result;
} }
@ -125,7 +171,7 @@ std::atomic<uint32_t> Context::_textureGPUTransferCount{ 0 };
void Context::incrementBufferGPUCount() { void Context::incrementBufferGPUCount() {
_bufferGPUCount++; _bufferGPUCount++;
} }
void Context::decrementBufferGPUCount() { void Context::decrementBufferGPUCount() {
_bufferGPUCount--; _bufferGPUCount--;
} }
void Context::updateBufferGPUMemoryUsage(Size prevObjectSize, Size newObjectSize) { void Context::updateBufferGPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {

View file

@ -16,12 +16,13 @@
#include <GLMHelpers.h> #include <GLMHelpers.h>
#include "Forward.h"
#include "Batch.h" #include "Batch.h"
#include "Resource.h" #include "Resource.h"
#include "Texture.h" #include "Texture.h"
#include "Pipeline.h" #include "Pipeline.h"
#include "Framebuffer.h" #include "Framebuffer.h"
#include "Frame.h"
class QImage; class QImage;
@ -46,51 +47,11 @@ public:
ContextStats(const ContextStats& stats) = default; ContextStats(const ContextStats& stats) = default;
}; };
struct StereoState {
bool _enable{ false };
bool _skybox{ false };
// 0 for left eye, 1 for right eye
uint8_t _pass{ 0 };
mat4 _eyeViews[2];
mat4 _eyeProjections[2];
};
class Backend { class Backend {
public: public:
virtual~ Backend() {}; virtual~ Backend() {};
virtual void render(Batch& batch) = 0; virtual void render(Batch& batch) = 0;
virtual void enableStereo(bool enable) {
_stereo._enable = enable;
}
virtual bool isStereo() {
return _stereo._enable;
}
void setStereoProjections(const mat4 eyeProjections[2]) {
for (int i = 0; i < 2; ++i) {
_stereo._eyeProjections[i] = eyeProjections[i];
}
}
void setStereoViews(const mat4 views[2]) {
for (int i = 0; i < 2; ++i) {
_stereo._eyeViews[i] = views[i];
}
}
void getStereoProjections(mat4* eyeProjections) const {
for (int i = 0; i < 2; ++i) {
eyeProjections[i] = _stereo._eyeProjections[i];
}
}
void getStereoViews(mat4* eyeViews) const {
for (int i = 0; i < 2; ++i) {
eyeViews[i] = _stereo._eyeViews[i];
}
}
virtual void syncCache() = 0; virtual void syncCache() = 0;
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0; virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0;
@ -137,8 +98,25 @@ public:
static void decrementTextureGPUTransferCount(); static void decrementTextureGPUTransferCount();
protected: protected:
StereoState _stereo; virtual bool isStereo() {
return _stereo._enable;
}
void getStereoProjections(mat4* eyeProjections) const {
for (int i = 0; i < 2; ++i) {
eyeProjections[i] = _stereo._eyeProjections[i];
}
}
void getStereoViews(mat4* eyeViews) const {
for (int i = 0; i < 2; ++i) {
eyeViews[i] = _stereo._eyeViews[i];
}
}
friend class Context;
ContextStats _stats; ContextStats _stats;
StereoState _stereo;
}; };
class Context { class Context {
@ -161,7 +139,10 @@ public:
Context(); Context();
~Context(); ~Context();
void render(Batch& batch); void setFrameHandler(FrameHandler handler);
void beginFrame(const FramebufferPointer& outputFramebuffer, const glm::mat4& renderPose = glm::mat4());
void append(Batch& batch);
void endFrame();
void enableStereo(bool enable = true); void enableStereo(bool enable = true);
bool isStereo(); bool isStereo();
@ -191,6 +172,10 @@ protected:
Context(const Context& context); Context(const Context& context);
std::unique_ptr<Backend> _backend; std::unique_ptr<Backend> _backend;
bool _frameActive { false };
Frame _currentFrame;
FrameHandler _frameHandler;
StereoState _stereo;
// This function can only be called by "static Shader::makeProgram()" // This function can only be called by "static Shader::makeProgram()"
// makeProgramShader(...) make a program shader ready to be used in a Batch. // makeProgramShader(...) make a program shader ready to be used in a Batch.
@ -234,7 +219,7 @@ template<typename F>
void doInBatch(std::shared_ptr<gpu::Context> context, F f) { void doInBatch(std::shared_ptr<gpu::Context> context, F f) {
gpu::Batch batch; gpu::Batch batch;
f(batch); f(batch);
context->render(batch); context->append(batch);
} }
}; };

View file

@ -12,6 +12,7 @@
#include <stdint.h> #include <stdint.h>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include <functional>
#include <glm/glm.hpp> #include <glm/glm.hpp>
@ -21,6 +22,9 @@ namespace gpu {
class Context; class Context;
using ContextPointer = std::shared_ptr<Context>; using ContextPointer = std::shared_ptr<Context>;
class GPUObject; class GPUObject;
class Frame;
using FramePointer = std::shared_ptr<Frame>;
using FrameHandler = std::function<void(Frame& frame)>;
using Stamp = int; using Stamp = int;
using uint32 = uint32_t; using uint32 = uint32_t;
@ -82,6 +86,15 @@ namespace gpu {
class TextureView; class TextureView;
using TextureViews = std::vector<TextureView>; using TextureViews = std::vector<TextureView>;
struct StereoState {
bool _enable{ false };
bool _skybox{ false };
// 0 for left eye, 1 for right eye
uint8 _pass{ 0 };
Mat4 _eyeViews[2];
Mat4 _eyeProjections[2];
};
namespace gl { namespace gl {
class GLBuffer; class GLBuffer;
} }

View file

@ -0,0 +1,29 @@
//
// Created by Bradley Austin Davis on 2016/07/26
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_Frame_h
#define hifi_gpu_Frame_h
#include "Forward.h"
namespace gpu {
class Frame {
public:
/// The sensor pose used for rendering the frame, only applicable for HMDs
glm::mat4 pose;
/// The collection of batches which make up the frame
std::vector<Batch> batches;
std::vector<StereoState> stereoStates;
/// The destination framebuffer in which the results will be placed
FramebufferPointer framebuffer;
};
};
#endif

View file

@ -11,34 +11,25 @@
#include "FramebufferCache.h" #include "FramebufferCache.h"
#include <mutex>
#include <glm/glm.hpp> #include <glm/glm.hpp>
#include <gpu/Format.h>
#include <gpu/Framebuffer.h>
#include <QMap>
#include <QQueue>
#include <gpu/Batch.h>
#include "RenderUtilsLogging.h" #include "RenderUtilsLogging.h"
static QQueue<gpu::FramebufferPointer> _cachedFramebuffers;
FramebufferCache::FramebufferCache() {
}
FramebufferCache::~FramebufferCache() {
_cachedFramebuffers.clear();
}
void FramebufferCache::setFrameBufferSize(QSize frameBufferSize) { void FramebufferCache::setFrameBufferSize(QSize frameBufferSize) {
//If the size changed, we need to delete our FBOs //If the size changed, we need to delete our FBOs
if (_frameBufferSize != frameBufferSize) { if (_frameBufferSize != frameBufferSize) {
_frameBufferSize = frameBufferSize; _frameBufferSize = frameBufferSize;
_selfieFramebuffer.reset(); _selfieFramebuffer.reset();
_cachedFramebuffers.clear();
_occlusionFramebuffer.reset(); _occlusionFramebuffer.reset();
_occlusionTexture.reset(); _occlusionTexture.reset();
_occlusionBlurredFramebuffer.reset(); _occlusionBlurredFramebuffer.reset();
_occlusionBlurredTexture.reset(); _occlusionBlurredTexture.reset();
{
std::unique_lock<std::mutex> lock(_mutex);
_cachedFramebuffers.clear();
}
} }
} }
@ -55,8 +46,6 @@ void FramebufferCache::createPrimaryFramebuffer() {
auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR); auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR);
resizeAmbientOcclusionBuffers(); resizeAmbientOcclusionBuffers();
} }
@ -87,7 +76,8 @@ void FramebufferCache::resizeAmbientOcclusionBuffers() {
gpu::FramebufferPointer FramebufferCache::getFramebuffer() { gpu::FramebufferPointer FramebufferCache::getFramebuffer() {
if (_cachedFramebuffers.isEmpty()) { std::unique_lock<std::mutex> lock(_mutex);
if (_cachedFramebuffers.empty()) {
_cachedFramebuffers.push_back(gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_SRGBA_32, _frameBufferSize.width(), _frameBufferSize.height()))); _cachedFramebuffers.push_back(gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_SRGBA_32, _frameBufferSize.width(), _frameBufferSize.height())));
} }
gpu::FramebufferPointer result = _cachedFramebuffers.front(); gpu::FramebufferPointer result = _cachedFramebuffers.front();
@ -96,6 +86,7 @@ gpu::FramebufferPointer FramebufferCache::getFramebuffer() {
} }
void FramebufferCache::releaseFramebuffer(const gpu::FramebufferPointer& framebuffer) { void FramebufferCache::releaseFramebuffer(const gpu::FramebufferPointer& framebuffer) {
std::unique_lock<std::mutex> lock(_mutex);
if (QSize(framebuffer->getSize().x, framebuffer->getSize().y) == _frameBufferSize) { if (QSize(framebuffer->getSize().x, framebuffer->getSize().y) == _frameBufferSize) {
_cachedFramebuffers.push_back(framebuffer); _cachedFramebuffers.push_back(framebuffer);
} }

View file

@ -11,13 +11,10 @@
#include <QSize> #include <QSize>
#include <gpu/Framebuffer.h> #include <mutex>
#include <gpu/Forward.h>
#include <DependencyManager.h> #include <DependencyManager.h>
namespace gpu {
class Batch;
}
/// Stores cached textures, including render-to-texture targets. /// Stores cached textures, including render-to-texture targets.
class FramebufferCache : public Dependency { class FramebufferCache : public Dependency {
SINGLETON_DEPENDENCY SINGLETON_DEPENDENCY
@ -47,9 +44,6 @@ public:
void releaseFramebuffer(const gpu::FramebufferPointer& framebuffer); void releaseFramebuffer(const gpu::FramebufferPointer& framebuffer);
private: private:
FramebufferCache();
virtual ~FramebufferCache();
void createPrimaryFramebuffer(); void createPrimaryFramebuffer();
gpu::FramebufferPointer _shadowFramebuffer; gpu::FramebufferPointer _shadowFramebuffer;
@ -65,6 +59,9 @@ private:
QSize _frameBufferSize{ 100, 100 }; QSize _frameBufferSize{ 100, 100 };
int _AOResolutionLevel = 1; // AO perform at half res int _AOResolutionLevel = 1; // AO perform at half res
std::mutex _mutex;
std::list<gpu::FramebufferPointer> _cachedFramebuffers;
// Resize/reallocate the buffers used for AO // Resize/reallocate the buffers used for AO
// the size of the AO buffers is scaled by the AOResolutionScale; // the size of the AO buffers is scaled by the AOResolutionScale;
void resizeAmbientOcclusionBuffers(); void resizeAmbientOcclusionBuffers();

View file

@ -1192,6 +1192,8 @@ void Model::deleteGeometry() {
_meshStates.clear(); _meshStates.clear();
_rig->destroyAnimGraph(); _rig->destroyAnimGraph();
_blendedBlendshapeCoefficients.clear(); _blendedBlendshapeCoefficients.clear();
_renderGeometry.reset();
_collisionGeometry.reset();
} }
AABox Model::getRenderableMeshBound() const { AABox Model::getRenderableMeshBound() const {

View file

@ -347,10 +347,10 @@ void DrawOverlay3D::run(const SceneContextPointer& sceneContext, const RenderCon
// Needs to be distinct from the other batch because using the clear call // Needs to be distinct from the other batch because using the clear call
// while stereo is enabled triggers a warning // while stereo is enabled triggers a warning
if (_opaquePass) { if (_opaquePass) {
gpu::Batch batch; gpu::doInBatch(args->_context, [&](gpu::Batch& batch){
batch.enableStereo(false); batch.enableStereo(false);
batch.clearFramebuffer(gpu::Framebuffer::BUFFER_DEPTH, glm::vec4(), 1.f, 0, true); batch.clearFramebuffer(gpu::Framebuffer::BUFFER_DEPTH, glm::vec4(), 1.f, 0, true);
args->_context->render(batch); });
} }
// Render the items // Render the items

View file

@ -20,7 +20,7 @@ struct Grid {
}; };
uniform gridBuffer { Grid grid; }; uniform gridBuffer { Grid grid; };
Grid getGrid() { return grid; }; Grid getGrid() { return grid; }
in vec2 varTexCoord0; in vec2 varTexCoord0;
in vec4 varColor; in vec4 varColor;

View file

@ -828,24 +828,44 @@ void ScriptEngine::run() {
_lastUpdate = usecTimestampNow(); _lastUpdate = usecTimestampNow();
std::chrono::microseconds totalUpdates;
// TODO: Integrate this with signals/slots instead of reimplementing throttling for ScriptEngine // TODO: Integrate this with signals/slots instead of reimplementing throttling for ScriptEngine
while (!_isFinished) { while (!_isFinished) {
auto beforeSleep = clock::now();
// Throttle to SCRIPT_FPS // Throttle to SCRIPT_FPS
// We'd like to try to keep the script at a solid SCRIPT_FPS update rate. And so we will
// calculate a sleepUntil to be the time from our start time until the original target
// sleepUntil for this frame.
const std::chrono::microseconds FRAME_DURATION(USECS_PER_SECOND / SCRIPT_FPS + 1); const std::chrono::microseconds FRAME_DURATION(USECS_PER_SECOND / SCRIPT_FPS + 1);
clock::time_point sleepTime(startTime + thisFrame++ * FRAME_DURATION); clock::time_point sleepUntil(startTime + thisFrame++ * FRAME_DURATION);
std::this_thread::sleep_until(sleepTime);
// However, if our sleepUntil is not at least our average update time into the future
// it means our script is taking too long in it's updates, and we want to punish the
// script a little bit. So we will force the sleepUntil to be at least our averageUpdate
// time into the future.
auto wouldSleep = (sleepUntil - clock::now());
auto avgerageUpdate = totalUpdates / thisFrame;
if (wouldSleep < avgerageUpdate) {
sleepUntil = beforeSleep + avgerageUpdate;
}
std::this_thread::sleep_until(sleepUntil);
#ifdef SCRIPT_DELAY_DEBUG #ifdef SCRIPT_DELAY_DEBUG
{ {
auto now = clock::now(); auto actuallySleptUntil = clock::now();
uint64_t seconds = std::chrono::duration_cast<std::chrono::seconds>(now - startTime).count(); uint64_t seconds = std::chrono::duration_cast<std::chrono::seconds>(actuallySleptUntil - startTime).count();
if (seconds > 0) { // avoid division by zero and time travel if (seconds > 0) { // avoid division by zero and time travel
uint64_t fps = thisFrame / seconds; uint64_t fps = thisFrame / seconds;
// Overreporting artificially reduces the reported rate // Overreporting artificially reduces the reported rate
if (thisFrame % SCRIPT_FPS == 0) { if (thisFrame % SCRIPT_FPS == 0) {
qCDebug(scriptengine) << qCDebug(scriptengine) <<
"Frame:" << thisFrame << "Frame:" << thisFrame <<
"Slept (us):" << std::chrono::duration_cast<std::chrono::microseconds>(now - sleepTime).count() << "Slept (us):" << std::chrono::duration_cast<std::chrono::microseconds>(actuallySleptUntil - beforeSleep).count() <<
"Avg Updates (us):" << avgerageUpdate.count() <<
"FPS:" << fps; "FPS:" << fps;
} }
} }
@ -874,10 +894,14 @@ void ScriptEngine::run() {
qint64 now = usecTimestampNow(); qint64 now = usecTimestampNow();
// we check for 'now' in the past in case people set their clock back // we check for 'now' in the past in case people set their clock back
if (_lastUpdate < now) { if (_emitScriptUpdates() && _lastUpdate < now) {
float deltaTime = (float) (now - _lastUpdate) / (float) USECS_PER_SECOND; float deltaTime = (float) (now - _lastUpdate) / (float) USECS_PER_SECOND;
if (!_isFinished) { if (!_isFinished) {
auto preUpdate = clock::now();
emit update(deltaTime); emit update(deltaTime);
auto postUpdate = clock::now();
auto elapsed = (postUpdate - preUpdate);
totalUpdates += std::chrono::duration_cast<std::chrono::microseconds>(elapsed);
} }
} }
_lastUpdate = now; _lastUpdate = now;

View file

@ -168,6 +168,8 @@ public:
// NOTE - this is used by the TypedArray implemetation. we need to review this for thread safety // NOTE - this is used by the TypedArray implemetation. we need to review this for thread safety
ArrayBufferClass* getArrayBufferClass() { return _arrayBufferClass; } ArrayBufferClass* getArrayBufferClass() { return _arrayBufferClass; }
void setEmitScriptUpdatesFunction(std::function<bool()> func) { _emitScriptUpdates = func; }
public slots: public slots:
void callAnimationStateHandler(QScriptValue callback, AnimVariantMap parameters, QStringList names, bool useNames, AnimVariantResultHandler resultHandler); void callAnimationStateHandler(QScriptValue callback, AnimVariantMap parameters, QStringList names, bool useNames, AnimVariantResultHandler resultHandler);
void updateMemoryCost(const qint64&); void updateMemoryCost(const qint64&);
@ -236,6 +238,9 @@ protected:
QUrl currentSandboxURL {}; // The toplevel url string for the entity script that loaded the code being executed, else empty. QUrl currentSandboxURL {}; // The toplevel url string for the entity script that loaded the code being executed, else empty.
void doWithEnvironment(const EntityItemID& entityID, const QUrl& sandboxURL, std::function<void()> operation); void doWithEnvironment(const EntityItemID& entityID, const QUrl& sandboxURL, std::function<void()> operation);
void callWithEnvironment(const EntityItemID& entityID, const QUrl& sandboxURL, QScriptValue function, QScriptValue thisObject, QScriptValueList args); void callWithEnvironment(const EntityItemID& entityID, const QUrl& sandboxURL, QScriptValue function, QScriptValue thisObject, QScriptValueList args);
std::function<bool()> _emitScriptUpdates{ [](){ return true; } };
}; };
#endif // hifi_ScriptEngine_h #endif // hifi_ScriptEngine_h

View file

@ -349,12 +349,8 @@ QVariant OffscreenUi::getCustomInfo(const Icon icon, const QString& title, const
} }
QVariant result = DependencyManager::get<OffscreenUi>()->customInputDialog(icon, title, config); QVariant result = DependencyManager::get<OffscreenUi>()->customInputDialog(icon, title, config);
if (result.isValid()) { if (ok && result.isValid()) {
// We get a JSON encoded result, so we unpack it into a QVariant wrapping a QVariantMap *ok = true;
result = QVariant(QJsonDocument::fromJson(result.toString().toUtf8()).object().toVariantMap());
if (ok) {
*ok = true;
}
} }
return result; return result;
@ -386,7 +382,13 @@ QVariant OffscreenUi::customInputDialog(const Icon icon, const QString& title, c
return result; return result;
} }
return waitForInputDialogResult(createCustomInputDialog(icon, title, config)); QVariant result = waitForInputDialogResult(createCustomInputDialog(icon, title, config));
if (result.isValid()) {
// We get a JSON encoded result, so we unpack it into a QVariant wrapping a QVariantMap
result = QVariant(QJsonDocument::fromJson(result.toString().toUtf8()).object().toVariantMap());
}
return result;
} }
void OffscreenUi::togglePinned() { void OffscreenUi::togglePinned() {

View file

@ -19,6 +19,8 @@ var CONTROLLER_DEAD_SPOT = 0.25;
var TRIGGER_SMOOTH_TIMESCALE = 0.1; var TRIGGER_SMOOTH_TIMESCALE = 0.1;
var OVERLAY_RAMP_RATE = 8.0; var OVERLAY_RAMP_RATE = 8.0;
var animStateHandlerID;
function clamp(val, min, max) { function clamp(val, min, max) {
return Math.min(Math.max(val, min), max); return Math.min(Math.max(val, min), max);
} }
@ -33,8 +35,10 @@ function lerp(a, b, alpha) {
function init() { function init() {
Script.update.connect(update); Script.update.connect(update);
MyAvatar.addAnimationStateHandler(animStateHandler, ["leftHandOverlayAlpha", "rightHandOverlayAlpha", animStateHandlerID = MyAvatar.addAnimationStateHandler(
"leftHandGraspAlpha", "rightHandGraspAlpha"]); animStateHandler,
["leftHandOverlayAlpha", "rightHandOverlayAlpha", "leftHandGraspAlpha", "rightHandGraspAlpha"]
);
} }
function animStateHandler(props) { function animStateHandler(props) {
@ -72,7 +76,7 @@ function update(dt) {
function shutdown() { function shutdown() {
Script.update.disconnect(update); Script.update.disconnect(update);
MyAvatar.removeAnimationStateHandler(animStateHandler); MyAvatar.removeAnimationStateHandler(animStateHandlerID);
} }
Script.scriptEnding.connect(shutdown); Script.scriptEnding.connect(shutdown);

View file

@ -403,7 +403,12 @@ private:
renderArgs._blitFramebuffer = finalFramebuffer; renderArgs._blitFramebuffer = finalFramebuffer;
} }
_gpuContext->beginFrame(renderArgs._blitFramebuffer);
gpu::doInBatch(renderArgs._context, [&](gpu::Batch& batch) {
batch.resetStages();
});
render(&renderArgs); render(&renderArgs);
_gpuContext->endFrame();
GLuint glTex; GLuint glTex;
{ {
auto gpuTex = renderArgs._blitFramebuffer->getRenderBuffer(0); auto gpuTex = renderArgs._blitFramebuffer->getRenderBuffer(0);
@ -428,9 +433,6 @@ private:
_offscreenContext->makeCurrent(); _offscreenContext->makeCurrent();
framebufferCache->releaseFramebuffer(renderArgs._blitFramebuffer); framebufferCache->releaseFramebuffer(renderArgs._blitFramebuffer);
renderArgs._blitFramebuffer.reset(); renderArgs._blitFramebuffer.reset();
gpu::doInBatch(renderArgs._context, [&](gpu::Batch& batch) {
batch.resetStages();
});
_fpsCounter.increment(); _fpsCounter.increment();
static size_t _frameCount { 0 }; static size_t _frameCount { 0 };
++_frameCount; ++_frameCount;

View file

@ -18,11 +18,22 @@
busy: false, busy: false,
preload: function(entityID) { preload: function(entityID) {
this.entityID = entityID; this.entityID = entityID;
Entities.editEntity(entityID, {
userData: JSON.stringify({
grabbableKey: {
grabbable: false,
wantsTrigger: true
}
})
});
Script.update.connect(this.update); Script.update.connect(this.update);
}, },
clickReleaseOnEntity: function() { clickReleaseOnEntity: function() {
this.createSupplies(); this.createSupplies();
}, },
startFarTrigger: function() {
this.createSupplies();
},
update: function() { update: function() {
if (_this.busy === true) { if (_this.busy === true) {
return; return;
@ -45,7 +56,6 @@
}, 2000) }, 2000)
} }
}, },
createSupplies: function() { createSupplies: function() {
var myProperties = Entities.getEntityProperties(this.entityID); var myProperties = Entities.getEntityProperties(this.entityID);