Merge branch 'master' of github.com:highfidelity/hifi into equip-via-thumb

This commit is contained in:
Seth Alves 2016-03-30 16:11:35 -07:00
commit fd4b944458
163 changed files with 3483 additions and 1917 deletions

View file

@ -11,7 +11,7 @@
#include "AssignmentParentFinder.h"
SpatiallyNestableWeakPointer AssignmentParentFinder::find(QUuid parentID, bool& success) const {
SpatiallyNestableWeakPointer AssignmentParentFinder::find(QUuid parentID, bool& success, SpatialParentTree* entityTree) const {
SpatiallyNestableWeakPointer parent;
if (parentID.isNull()) {
@ -20,7 +20,11 @@ SpatiallyNestableWeakPointer AssignmentParentFinder::find(QUuid parentID, bool&
}
// search entities
parent = _tree->findEntityByEntityItemID(parentID);
if (entityTree) {
parent = entityTree->findByID(parentID);
} else {
parent = _tree->findEntityByEntityItemID(parentID);
}
if (parent.expired()) {
success = false;
} else {

View file

@ -25,7 +25,7 @@ class AssignmentParentFinder : public SpatialParentFinder {
public:
AssignmentParentFinder(EntityTreePointer tree) : _tree(tree) { }
virtual ~AssignmentParentFinder() { }
virtual SpatiallyNestableWeakPointer find(QUuid parentID, bool& success) const;
virtual SpatiallyNestableWeakPointer find(QUuid parentID, bool& success, SpatialParentTree* entityTree = nullptr) const;
protected:
EntityTreePointer _tree;

View file

@ -1484,10 +1484,10 @@ PropertiesTool = function(opts) {
selections.push(entity);
}
data.selections = selections;
webView.eventBridge.emitScriptEvent(JSON.stringify(data));
webView.emitScriptEvent(JSON.stringify(data));
});
webView.eventBridge.webEventReceived.connect(function(data) {
webView.webEventReceived.connect(function(data) {
data = JSON.parse(data);
if (data.type == "print") {
if (data.message) {
@ -1802,7 +1802,7 @@ var showMenuItem = propertyMenu.addMenuItem("Show in Marketplace");
propertiesTool = PropertiesTool();
var particleExplorerTool = ParticleExplorerTool();
var selectedParticleEntity = 0;
entityListTool.webView.eventBridge.webEventReceived.connect(function(data) {
entityListTool.webView.webEventReceived.connect(function(data) {
var data = JSON.parse(data);
if (data.type == "selectionUpdate") {
var ids = data.entityIds;
@ -1823,10 +1823,10 @@ entityListTool.webView.eventBridge.webEventReceived.connect(function(data) {
selectedParticleEntity = ids[0];
particleExplorerTool.setActiveParticleEntity(ids[0]);
particleExplorerTool.webView.eventBridge.webEventReceived.connect(function(data) {
particleExplorerTool.webView.webEventReceived.connect(function(data) {
var data = JSON.parse(data);
if (data.messageType === "page_loaded") {
particleExplorerTool.webView.eventBridge.emitScriptEvent(JSON.stringify(particleData));
particleExplorerTool.webView.emitScriptEvent(JSON.stringify(particleData));
}
});
} else {

View file

@ -0,0 +1,96 @@
(function() {
Script.include("../../libraries/virtualBaton.js");
Script.include("../../libraries/utils.js");
var _this = this;
this.startUpdate = function() {
print("EBL START UPDATE");
Entities.editEntity(_this.batonOwnerIndicator, {
visible: true
});
// Change color of box
Entities.editEntity(_this.entityID, {
color: randomColor()
});
_this.position = Entities.getEntityProperties(_this.entityID, "position").position;
_this.debugLightProperties.position = Vec3.sum(_this.position, {x: 0, y: 1, z: 0});
_this.debugLightProperties.color = randomColor();
var debugLight = Entities.addEntity(_this.debugLightProperties);
Script.setTimeout(function() {
Entities.deleteEntity(debugLight);
}, 500);
}
this.maybeClaim = function() {
print("EBL MAYBE CLAIM");
if (_this.isBatonOwner === true) {
_this.isBatonOwner = false;
}
Entities.editEntity(_this.batonOwnerIndicator, {
visible: false
});
baton.claim(_this.startUpdate, _this.maybeClaim);
}
this.unload = function() {
print("EBL UNLOAD");
baton.unload();
Entities.deleteEntity(_this.batonOwnerIndicator);
}
this.preload = function(entityID) {
print("EBL Preload!!");
_this.entityID = entityID;
_this.setupDebugEntities();
baton = virtualBaton({
batonName: "batonSimpleEntityScript:" + _this.entityID
});
_this.isBatonOwner = false;
_this.maybeClaim();
}
this.setupDebugEntities = function() {
_this.batonOwnerIndicator = Entities.addEntity({
type: "Box",
color: {
red: 200,
green: 10,
blue: 200
},
position: Vec3.sum(MyAvatar.position, {
x: 0,
y: 1,
z: 0
}),
dimensions: {
x: 0.5,
y: 1,
z: 0
},
parentID: MyAvatar.sessionUUID,
visible: false
});
}
_this.debugLightProperties = {
type: "Light",
name: "hifi-baton-light",
dimensions: {
x: 10,
y: 10,
z: 10
},
falloffRadius: 3,
intensity: 20,
}
});

View file

@ -0,0 +1,33 @@
var orientation = Camera.getOrientation();
orientation = Quat.safeEulerAngles(orientation);
orientation.x = 0;
orientation = Quat.fromVec3Degrees(orientation);
var center = Vec3.sum(MyAvatar.position, Vec3.multiply(3, Quat.getFront(orientation)));
// Math.random ensures no caching of script
var SCRIPT_URL = Script.resolvePath("batonSimpleEntityScript.js");
var batonBox = Entities.addEntity({
type: "Box",
name: "hifi-baton-entity",
color: {
red: 200,
green: 200,
blue: 200
},
position: center,
dimensions: {
x: 0.1,
y: 0.1,
z: 0.1
},
script: SCRIPT_URL
});
function cleanup() {
Entities.deleteEntity(batonBox);
}
Script.scriptEnding.connect(cleanup);

View file

@ -9,51 +9,11 @@
//
var EventBridge;
EventBridgeConnectionProxy = function(parent) {
this.parent = parent;
this.realSignal = this.parent.realBridge.scriptEventReceived
this.webWindowId = this.parent.webWindow.windowId;
}
EventBridgeConnectionProxy.prototype.connect = function(callback) {
var that = this;
this.realSignal.connect(function(id, message) {
if (id === that.webWindowId) { callback(message); }
});
}
EventBridgeProxy = function(webWindow) {
this.webWindow = webWindow;
this.realBridge = this.webWindow.eventBridge;
this.scriptEventReceived = new EventBridgeConnectionProxy(this);
}
EventBridgeProxy.prototype.emitWebEvent = function(data) {
this.realBridge.emitWebEvent(data);
}
var WebChannel;
openEventBridge = function(callback) {
EVENT_BRIDGE_URI = "ws://localhost:51016";
socket = new WebSocket(this.EVENT_BRIDGE_URI);
socket.onclose = function() {
console.error("web channel closed");
};
socket.onerror = function(error) {
console.error("web channel error: " + error);
};
socket.onopen = function() {
channel = new QWebChannel(socket, function(channel) {
console.log("Document url is " + document.URL);
var webWindow = channel.objects[document.URL.toLowerCase()];
console.log("WebWindow is " + webWindow)
eventBridgeProxy = new EventBridgeProxy(webWindow);
EventBridge = eventBridgeProxy;
if (callback) { callback(eventBridgeProxy); }
});
}
WebChannel = new QWebChannel(qt.webChannelTransport, function (channel) {
EventBridge = WebChannel.objects.eventBridgeWrapper.eventBridge;
callback(EventBridge);
});
}

View file

@ -4,21 +4,17 @@
<script type="text/javascript" src="jquery-2.1.4.min.js"></script>
<script type="text/javascript" src="qrc:///qtwebchannel/qwebchannel.js"></script>
<script type="text/javascript" src="eventBridgeLoader.js"></script>
<script>
var myBridge;
window.onload = function() {
openEventBridge(function(eventBridge) {
myBridge = eventBridge;
myBridge.scriptEventReceived.connect(function(message) {
openEventBridge(function() {
EventBridge.scriptEventReceived.connect(function(message) {
console.log("HTML side received message: " + message);
});
});
}
testClick = function() {
myBridge.emitWebEvent("HTML side sending message - button click");
EventBridge.emitWebEvent(["Foo", "Bar", { "baz": 1} ]);
}
</script>
</head>

View file

@ -38,14 +38,14 @@ EntityListTool = function(opts) {
type: 'selectionUpdate',
selectedIDs: selectedIDs,
};
webView.eventBridge.emitScriptEvent(JSON.stringify(data));
webView.emitScriptEvent(JSON.stringify(data));
});
that.clearEntityList = function () {
var data = {
type: 'clearEntityList'
}
webView.eventBridge.emitScriptEvent(JSON.stringify(data));
webView.emitScriptEvent(JSON.stringify(data));
};
that.sendUpdate = function() {
@ -72,11 +72,11 @@ EntityListTool = function(opts) {
entities: entities,
selectedIDs: selectedIDs,
};
webView.eventBridge.emitScriptEvent(JSON.stringify(data));
webView.emitScriptEvent(JSON.stringify(data));
}
webView.eventBridge.webEventReceived.connect(function(data) {
webView.webEventReceived.connect(function(data) {
data = JSON.parse(data);
if (data.type == "selectionUpdate") {
var ids = data.entityIds;

View file

@ -234,11 +234,11 @@ GridTool = function(opts) {
});
horizontalGrid.addListener(function(data) {
webView.eventBridge.emitScriptEvent(JSON.stringify(data));
webView.emitScriptEvent(JSON.stringify(data));
selectionDisplay.updateHandles();
});
webView.eventBridge.webEventReceived.connect(function(data) {
webView.webEventReceived.connect(function(data) {
data = JSON.parse(data);
if (data.type == "init") {
horizontalGrid.emitUpdate();

View file

@ -28,7 +28,6 @@ colorMix = function(colorA, colorB, mix) {
}
return result;
}
scaleLine = function (start, end, scale) {
var v = Vec3.subtract(end, start);
var length = Vec3.length(v);
@ -262,6 +261,16 @@ randInt = function(low, high) {
return Math.floor(randFloat(low, high));
}
randomColor = function() {
return {
red: randInt(0, 255),
green: randInt(0, 255),
blue: randInt(0, 255)
}
}
hexToRgb = function(hex) {
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
return result ? {

View file

@ -26,7 +26,7 @@ ParticleExplorerTool = function() {
});
that.webView.setVisible(true);
that.webView.eventBridge.webEventReceived.connect(that.webEventReceived);
that.webView.webEventReceived.connect(that.webEventReceived);
}

View file

@ -120,21 +120,25 @@ function menuItemEvent(menuItem) {
if (menuItem.endsWith(" for Output")) {
var selectedDevice = menuItem.trimStartsWith("Use ").trimEndsWith(" for Output");
print("output audio selection..." + selectedDevice);
Menu.menuItemEvent.disconnect(menuItemEvent);
Menu.setIsOptionChecked(selectedOutputMenu, false);
selectedOutputMenu = menuItem;
Menu.setIsOptionChecked(selectedOutputMenu, true);
if (AudioDevice.setOutputDevice(selectedDevice)) {
Settings.setValue(OUTPUT_DEVICE_SETTING, selectedDevice);
}
Menu.menuItemEvent.connect(menuItemEvent);
} else if (menuItem.endsWith(" for Input")) {
var selectedDevice = menuItem.trimStartsWith("Use ").trimEndsWith(" for Input");
print("input audio selection..." + selectedDevice);
Menu.menuItemEvent.disconnect(menuItemEvent);
Menu.setIsOptionChecked(selectedInputMenu, false);
selectedInputMenu = menuItem;
Menu.setIsOptionChecked(selectedInputMenu, true);
if (AudioDevice.setInputDevice(selectedDevice)) {
Settings.setValue(INPUT_DEVICE_SETTING, selectedDevice);
}
Menu.menuItemEvent.connect(menuItemEvent);
}
}
}

View file

@ -0,0 +1,31 @@
var orientation = Camera.getOrientation();
orientation = Quat.safeEulerAngles(orientation);
orientation.x = 0;
orientation = Quat.fromVec3Degrees(orientation);
var center = Vec3.sum(MyAvatar.position, Vec3.multiply(3, Quat.getFront(orientation)));
// Math.random ensures no caching of script
var SCRIPT_URL = Script.resolvePath("myEntityScript.js")
var myEntity = Entities.addEntity({
type: "Sphere",
color: {
red: 200,
green: 10,
blue: 200
},
position: center,
dimensions: {
x: 1,
y: 1,
z: 1
},
script: SCRIPT_URL
})
function cleanup() {
// Entities.deleteEntity(myEntity);
}
Script.scriptEnding.connect(cleanup);

View file

@ -0,0 +1,24 @@
(function() {
var _this;
MyEntity = function() {
_this = this;
};
MyEntity.prototype = {
preload: function(entityID) {
this.entityID = entityID;
var randNum = Math.random().toFixed(3);
print("EBL PRELOAD ENTITY SCRIPT!!!", randNum)
},
};
// entity scripts always need to return a newly constructed object of our type
return new MyEntity();
});

View file

@ -0,0 +1,85 @@
(function() {
Script.include("../../libraries/virtualBaton.js");
var baton;
var _this;
BatonSoundEntity = function() {
_this = this;
_this.drumSound = SoundCache.getSound("https://s3.amazonaws.com/hifi-public/sounds/Drums/deepdrum1.wav");
_this.injectorOptions = {position: MyAvatar.position, loop: false, volume: 1};
_this.soundIntervalConnected = false;
_this.batonDebugModel = Entities.addEntity({
type: "Box",
color: {red: 200, green: 10, blue: 200},
position: Vec3.sum(MyAvatar.position, {x: 0, y: 1, z: 0}),
dimensions: {x: 0.5, y: 1, z: 0},
parentID: MyAvatar.sessionUUID,
visible: false
});
};
function startUpdate() {
// We are claiming the baton! So start our clip
if (!_this.soundInjector) {
// This client hasn't created their injector yet so create one
_this.soundInjector = Audio.playSound(_this.drumSound, _this.injectorOptions);
} else {
// We already have our injector so just restart it
_this.soundInjector.restart();
}
print("EBL START UPDATE");
Entities.editEntity(_this.batonDebugModel, {visible: true});
_this.playSoundInterval = Script.setInterval(function() {
_this.soundInjector.restart();
}, _this.drumSound.duration * 1000); // Duration is in seconds so convert to ms
_this.soundIntervalConnected = true;
}
function stopUpdateAndReclaim() {
print("EBL STOP UPDATE AND RECLAIM")
// when the baton is release
if (_this.soundIntervalConnected === true) {
Script.clearInterval(_this.playSoundInterval);
_this.soundIntervalConnected = false;
print("EBL CLEAR INTERVAL")
}
Entities.editEntity(_this.batonDebugModel, {visible: false});
// hook up callbacks to the baton
baton.claim(startUpdate, stopUpdateAndReclaim);
}
BatonSoundEntity.prototype = {
preload: function(entityID) {
_this.entityID = entityID;
print("EBL PRELOAD ENTITY SCRIPT!!!");
baton = virtualBaton({
// One winner for each entity
batonName: "io.highfidelity.soundEntityBatonTest:" + _this.entityID,
// debugFlow: true
});
stopUpdateAndReclaim();
},
unload: function() {
print("EBL UNLOAD");
// baton.release();
baton.unload();
Entities.deleteEntity(_this.batonDebugModel);
if (_this.soundIntervalConnected === true) {
Script.clearInterval(_this.playSoundInterval);
_this.soundIntervalConnected = false;
_this.soundInjector.stop();
delete _this.soundInjector;
}
}
};
// entity scripts always need to return a newly constructed object of our type
return new BatonSoundEntity();
});

View file

@ -0,0 +1,31 @@
var orientation = Camera.getOrientation();
orientation = Quat.safeEulerAngles(orientation);
orientation.x = 0;
orientation = Quat.fromVec3Degrees(orientation);
var center = Vec3.sum(MyAvatar.position, Vec3.multiply(3, Quat.getFront(orientation)));
// Math.random ensures no caching of script
var SCRIPT_URL = Script.resolvePath("batonSoundTestEntityScript.js")
var soundEntity = Entities.addEntity({
type: "Box",
color: {
red: 200,
green: 10,
blue: 10
},
position: center,
dimensions: {
x: 0.1,
y: 0.1,
z: 0.1
},
script: SCRIPT_URL
});
function cleanup() {
// Entities.deleteEntity(soundEntity);
}
Script.scriptEnding.connect(cleanup);

View file

@ -2,32 +2,18 @@ print("Launching web window");
var htmlUrl = Script.resolvePath("..//html/qmlWebTest.html")
webWindow = new OverlayWebWindow('Test Event Bridge', htmlUrl, 320, 240, false);
print("JS Side window: " + webWindow);
print("JS Side bridge: " + webWindow.eventBridge);
webWindow.eventBridge.webEventReceived.connect(function(data) {
webWindow.webEventReceived.connect(function(data) {
print("JS Side event received: " + data);
});
var titles = ["A", "B", "C"];
var titleIndex = 0;
Script.setInterval(function() {
webWindow.eventBridge.emitScriptEvent("JS Event sent");
var size = webWindow.size;
var position = webWindow.position;
print("Window url: " + webWindow.url)
print("Window visible: " + webWindow.visible)
print("Window size: " + size.x + "x" + size.y)
print("Window pos: " + position.x + "x" + position.y)
webWindow.setVisible(!webWindow.visible);
webWindow.setTitle(titles[titleIndex]);
webWindow.setSize(320 + Math.random() * 100, 240 + Math.random() * 100);
titleIndex += 1;
titleIndex %= titles.length;
}, 2 * 1000);
var message = [ Math.random(), Math.random() ];
print("JS Side sending: " + message);
webWindow.emitScriptEvent(message);
}, 5 * 1000);
Script.setTimeout(function() {
print("Closing script");
Script.scriptEnding.connect(function(){
webWindow.close();
Script.stop();
}, 15 * 1000)
webWindow.deleteLater();
});

View file

@ -0,0 +1 @@
ConfigSlider 1.0 ConfigSlider.qml

View file

@ -0,0 +1,114 @@
//
// culling.qml
// examples/utilities/render
//
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
//
import QtQuick 2.5
import QtQuick.Controls 1.4
import "configSlider"
Column {
id: root
spacing: 8
property var sceneOctree: Render.getConfig("DrawSceneOctree");
property var itemSelection: Render.getConfig("DrawItemSelection");
Component.onCompleted: {
sceneOctree.enabled = true;
itemSelection.enabled = true;
sceneOctree.showVisibleCells = false;
sceneOctree.showEmptyCells = false;
itemSelection.showInsideItems = false;
itemSelection.showInsideSubcellItems = false;
itemSelection.showPartialItems = false;
itemSelection.showPartialSubcellItems = false;
}
Component.onDestruction: {
sceneOctree.enabled = false;
itemSelection.enabled = false;
Render.getConfig("FetchSceneSelection").freezeFrustum = false;
Render.getConfig("CullSceneSelection").freezeFrustum = false;
}
GroupBox {
title: "Culling"
Row {
spacing: 8
Column {
spacing: 8
CheckBox {
text: "Freeze Culling Frustum"
checked: false
onCheckedChanged: {
Render.getConfig("FetchSceneSelection").freezeFrustum = checked;
Render.getConfig("CullSceneSelection").freezeFrustum = checked;
}
}
Label {
text: "Octree"
}
CheckBox {
text: "Visible Cells"
checked: root.sceneOctree.showVisibleCells
onCheckedChanged: { root.sceneOctree.showVisibleCells = checked }
}
CheckBox {
text: "Empty Cells"
checked: false
onCheckedChanged: { root.sceneOctree.showEmptyCells = checked }
}
}
Column {
spacing: 8
Label {
text: "Frustum Items"
}
CheckBox {
text: "Inside Items"
checked: false
onCheckedChanged: { root.itemSelection.showInsideItems = checked }
}
CheckBox {
text: "Inside Sub-cell Items"
checked: false
onCheckedChanged: { root.itemSelection.showInsideSubcellItems = checked }
}
CheckBox {
text: "Partial Items"
checked: false
onCheckedChanged: { root.itemSelection.showPartialItems = checked }
}
CheckBox {
text: "Partial Sub-cell Items"
checked: false
onCheckedChanged: { root.itemSelection.showPartialSubcellItems = checked }
}
}
}
}
GroupBox {
title: "Render Items"
Column{
Repeater {
model: [ "Opaque:DrawOpaqueDeferred", "Transparent:DrawTransparentDeferred", "Light:DrawLight",
"Opaque Overlays:DrawOverlay3DOpaque", "Transparent Overlays:DrawOverlay3DTransparent" ]
ConfigSlider {
label: qsTr(modelData.split(":")[0])
integral: true
config: Render.getConfig(modelData.split(":")[1])
property: "maxDrawn"
max: config.numDrawn
min: -1
}
}
}
}
}

View file

@ -0,0 +1,21 @@
//
// debugRender.js
// examples/utilities/render
//
// Sam Gateau, created on 3/22/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// Set up the qml ui
var qml = Script.resolvePath('culling.qml');
var window = new OverlayWindow({
title: 'Render Draws',
source: qml,
width: 300,
height: 200
});
window.setPosition(200, 50);
window.closed.connect(function() { Script.stop(); });

View file

@ -10,6 +10,7 @@
//
import QtQuick 2.5
import QtQuick.Controls 1.4
import "configSlider"
Column {
id: root

View file

@ -0,0 +1,211 @@
//
// PlotPerf.qml
// examples/utilities/render/plotperf
//
// Created by Sam Gateau on 3//2016
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
//
import QtQuick 2.5
import QtQuick.Controls 1.4
Item {
id: root
width: parent.width
height: 100
// The title of the graph
property string title
// THe object used as the default source object for the prop plots
property var object
// THis is my hack to get a property and assign it to a trigger var in order to get
// a signal called whenever the value changed
property var trigger
// Plots is an array of plot descriptor
// a default plot descriptor expects the following object:
// prop: [ {
// object: {} // Optional: this is the object from which the prop will be fetched,
// if nothing than the object from root is used
// prop:"bufferCPUCount", // Needed the name of the property from the object to feed the plot
// label: "CPU", // Optional: Label as displayed on the plot
// color: "#00B4EF" // Optional: Color of the curve
// unit: "km/h" // Optional: Unit added to the value displayed, if nothing then the default unit is used
// scale: 1 // Optional: Extra scaling used to represent the value, this scale is combined with the global scale.
// },
property var plots
// Default value scale used to define the max value of the chart
property var valueScale: 1
// Default value unit appended to the value displayed
property var valueUnit: ""
// Default number of digits displayed
property var valueNumDigits: 0
property var valueMax : 1
property var _values : new Array()
property var tick : 0
function createValues() {
print("trigger is: " + JSON.stringify(trigger))
if (Array.isArray(plots)) {
for (var i =0; i < plots.length; i++) {
var plot = plots[i];
print(" a pnew Plot:" + JSON.stringify(plot));
_values.push( {
object: (plot["object"] !== undefined ? plot["object"] : root.object),
value: plot["prop"],
valueMax: 1,
numSamplesConstantMax: 0,
valueHistory: new Array(),
label: (plot["label"] !== undefined ? plot["label"] : ""),
color: (plot["color"] !== undefined ? plot["color"] : "white"),
scale: (plot["scale"] !== undefined ? plot["scale"] : 1),
unit: (plot["unit"] !== undefined ? plot["unit"] : valueUnit)
})
}
}
print("in creator" + JSON.stringify(_values));
}
Component.onCompleted: {
createValues();
print(JSON.stringify(_values));
}
function pullFreshValues() {
//print("pullFreshValues");
var VALUE_HISTORY_SIZE = 100;
var UPDATE_CANVAS_RATE = 20;
tick++;
var currentValueMax = 0
for (var i = 0; i < _values.length; i++) {
var currentVal = _values[i].object[_values[i].value] * _values[i].scale;
_values[i].valueHistory.push(currentVal)
_values[i].numSamplesConstantMax++;
if (_values[i].valueHistory.length > VALUE_HISTORY_SIZE) {
var lostValue = _values[i].valueHistory.shift();
if (lostValue >= _values[i].valueMax) {
_values[i].valueMax *= 0.99
_values[i].numSamplesConstantMax = 0
}
}
if (_values[i].valueMax < currentVal) {
_values[i].valueMax = currentVal;
_values[i].numSamplesConstantMax = 0
}
if (_values[i].numSamplesConstantMax > VALUE_HISTORY_SIZE) {
_values[i].numSamplesConstantMax = 0
_values[i].valueMax *= 0.95 // lower slowly the current max if no new above max since a while
}
if (currentValueMax < _values[i].valueMax) {
currentValueMax = _values[i].valueMax
}
}
if ((valueMax < currentValueMax) || (tick % VALUE_HISTORY_SIZE == 0)) {
valueMax = currentValueMax;
}
if (tick % UPDATE_CANVAS_RATE == 0) {
mycanvas.requestPaint()
}
}
onTriggerChanged: pullFreshValues()
Canvas {
id: mycanvas
anchors.fill:parent
onPaint: {
var lineHeight = 12;
function displayValue(val, unit) {
return (val / root.valueScale).toFixed(root.valueNumDigits) + " " + unit
}
function pixelFromVal(val, valScale) {
return lineHeight + (height - lineHeight) * (1 - (0.9) * val / valueMax);
}
function valueFromPixel(pixY) {
return ((pixY - lineHeight) / (height - lineHeight) - 1) * valueMax / (-0.9);
}
function plotValueHistory(ctx, valHistory, color) {
var widthStep= width / (valHistory.length - 1);
ctx.beginPath();
ctx.strokeStyle= color; // Green path
ctx.lineWidth="2";
ctx.moveTo(0, pixelFromVal(valHistory[0]));
for (var i = 1; i < valHistory.length; i++) {
ctx.lineTo(i * widthStep, pixelFromVal(valHistory[i]));
}
ctx.stroke();
}
function displayValueLegend(ctx, val, num) {
ctx.fillStyle = val.color;
var bestValue = val.valueHistory[val.valueHistory.length -1];
ctx.textAlign = "right";
ctx.fillText(displayValue(bestValue, val.unit), width, (num + 2) * lineHeight * 1.5);
ctx.textAlign = "left";
ctx.fillText(val.label, 0, (num + 2) * lineHeight * 1.5);
}
function displayTitle(ctx, text, maxVal) {
ctx.fillStyle = "grey";
ctx.textAlign = "right";
ctx.fillText(displayValue(valueFromPixel(lineHeight), root.valueUnit), width, lineHeight);
ctx.fillStyle = "white";
ctx.textAlign = "left";
ctx.fillText(text, 0, lineHeight);
}
function displayBackground(ctx) {
ctx.fillStyle = Qt.rgba(0, 0, 0, 0.6);
ctx.fillRect(0, 0, width, height);
ctx.strokeStyle= "grey";
ctx.lineWidth="2";
ctx.beginPath();
ctx.moveTo(0, lineHeight + 1);
ctx.lineTo(width, lineHeight + 1);
ctx.moveTo(0, height);
ctx.lineTo(width, height);
ctx.stroke();
}
var ctx = getContext("2d");
ctx.clearRect(0, 0, width, height);
ctx.font="12px Verdana";
displayBackground(ctx);
for (var i = 0; i < _values.length; i++) {
plotValueHistory(ctx, _values[i].valueHistory, _values[i].color)
displayValueLegend(ctx, _values[i], i)
}
displayTitle(ctx, title, valueMax)
}
}
}

View file

@ -0,0 +1 @@
PlotPerf 1.0 PlotPerf.qml

View file

@ -0,0 +1,21 @@
//
// renderStats.js
// examples/utilities/tools/render
//
// Sam Gateau, created on 3/22/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// Set up the qml ui
var qml = Script.resolvePath('stats.qml');
var window = new OverlayWindow({
title: 'Render Stats',
source: qml,
width: 300,
height: 200
});
window.setPosition(500, 50);
window.closed.connect(function() { Script.stop(); });

View file

@ -0,0 +1,195 @@
//
// stats.qml
// examples/utilities/render
//
// Created by Zach Pomerantz on 2/8/2016
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
//
import QtQuick 2.5
import QtQuick.Controls 1.4
import "plotperf"
Item {
id: statsUI
anchors.fill:parent
Column {
id: stats
spacing: 8
anchors.fill:parent
property var config: Render.getConfig("Stats")
function evalEvenHeight() {
// Why do we have to do that manually ? cannot seem to find a qml / anchor / layout mode that does that ?
return (height - spacing * (children.length - 1)) / children.length
}
PlotPerf {
title: "Num Buffers"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["bufferCPUCount"]
plots: [
{
prop: "bufferCPUCount",
label: "CPU",
color: "#00B4EF"
},
{
prop: "bufferGPUCount",
label: "GPU",
color: "#1AC567"
}
]
}
PlotPerf {
title: "gpu::Buffer Memory"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["bufferCPUMemoryUsage"]
valueScale: 1048576
valueUnit: "Mb"
valueNumDigits: "1"
plots: [
{
prop: "bufferCPUMemoryUsage",
label: "CPU",
color: "#00B4EF"
},
{
prop: "bufferGPUMemoryUsage",
label: "GPU",
color: "#1AC567"
}
]
}
PlotPerf {
title: "Num Textures"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["textureCPUCount"]
plots: [
{
prop: "textureCPUCount",
label: "CPU",
color: "#00B4EF"
},
{
prop: "textureGPUCount",
label: "GPU",
color: "#1AC567"
},
{
prop: "frameTextureCount",
label: "Frame",
color: "#E2334D"
}
]
}
PlotPerf {
title: "gpu::Texture Memory"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["textureCPUMemoryUsage"]
valueScale: 1048576
valueUnit: "Mb"
valueNumDigits: "1"
plots: [
{
prop: "textureCPUMemoryUsage",
label: "CPU",
color: "#00B4EF"
},
{
prop: "textureGPUMemoryUsage",
label: "GPU",
color: "#1AC567"
}
]
}
PlotPerf {
title: "Triangles"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["frameTriangleCount"]
valueScale: 1000
valueUnit: "K"
plots: [
{
prop: "frameTriangleCount",
label: "Triangles",
color: "#1AC567"
},
{
prop: "frameTriangleRate",
label: "rate",
color: "#E2334D",
scale: 0.001,
unit: "MT/s"
}
]
}
PlotPerf {
title: "Drawcalls"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["frameDrawcallCount"]
plots: [
{
prop: "frameAPIDrawcallCount",
label: "API Drawcalls",
color: "#00B4EF"
},
{
prop: "frameDrawcallCount",
label: "GPU Drawcalls",
color: "#1AC567"
},
{
prop: "frameDrawcallRate",
label: "rate",
color: "#E2334D",
scale: 0.001,
unit: "K/s"
}
]
}
property var drawOpaqueConfig: Render.getConfig("DrawOpaqueDeferred")
property var drawTransparentConfig: Render.getConfig("DrawTransparentDeferred")
property var drawLightConfig: Render.getConfig("DrawLight")
PlotPerf {
title: "Items"
height: parent.evalEvenHeight()
object: parent.drawOpaqueConfig
trigger: Render.getConfig("DrawOpaqueDeferred")["numDrawn"]
plots: [
{
object: Render.getConfig("DrawOpaqueDeferred"),
prop: "numDrawn",
label: "Opaques",
color: "#1AC567"
},
{
object: Render.getConfig("DrawTransparentDeferred"),
prop: "numDrawn",
label: "Translucents",
color: "#00B4EF"
},
{
object: Render.getConfig("DrawLight"),
prop: "numDrawn",
label: "Lights",
color: "#E2334D"
}
]
}
}
}

View file

@ -1,99 +0,0 @@
//
// debugRenderOctree.js
// examples/utilities/tools
//
// Sam Gateau
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
Script.include("cookies.js");
var panel = new Panel(10, 300);
var drawOctree = Render.RenderDeferredTask.DrawSceneOctree;
Render.RenderDeferredTask.DrawSceneOctree.enabled = true;
Render.RenderDeferredTask.DrawItemSelection.enabled = true;
panel.newCheckbox("Show Octree Cells",
function(value) { Render.RenderDeferredTask.DrawSceneOctree.showVisibleCells = value; },
function() { return (Render.RenderDeferredTask.DrawSceneOctree.showVisibleCells); },
function(value) { return (value); }
);
panel.newCheckbox("Show Empty Cells",
function(value) { Render.RenderDeferredTask.DrawSceneOctree.showEmptyCells = value; },
function() { return (Render.RenderDeferredTask.DrawSceneOctree.showEmptyCells); },
function(value) { return (value); }
);
panel.newCheckbox("Freeze Frustum",
function(value) { Render.RenderDeferredTask.FetchSceneSelection.freezeFrustum = value; Render.RenderDeferredTask.CullSceneSelection.freezeFrustum = value; },
function() { return (Render.RenderDeferredTask.FetchSceneSelection.freezeFrustum); },
function(value) { return (value); }
);
panel.newCheckbox("Show Inside Items",
function(value) { Render.RenderDeferredTask.DrawItemSelection.showInsideItems = value; },
function() { return (Render.RenderDeferredTask.DrawItemSelection.showInsideItems); },
function(value) { return (value); }
);
panel.newCheckbox("Show Inside Subcell Items",
function(value) { Render.RenderDeferredTask.DrawItemSelection.showInsideSubcellItems = value; },
function() { return (Render.RenderDeferredTask.DrawItemSelection.showInsideSubcellItems); },
function(value) { return (value); }
);
panel.newCheckbox("Show Partial Items",
function(value) { Render.RenderDeferredTask.DrawItemSelection.showPartialItems = value; },
function() { return (Render.RenderDeferredTask.DrawItemSelection.showPartialItems); },
function(value) { return (value); }
);
panel.newCheckbox("Show Partial Subcell Items",
function(value) { Render.RenderDeferredTask.DrawItemSelection.showPartialSubcellItems = value; },
function() { return (Render.RenderDeferredTask.DrawItemSelection.showPartialSubcellItems); },
function(value) { return (value); }
);
/*
panel.newSlider('Cells Free / Allocated', -1, 1,
function(value) { value; }, // setter
function() { return Render.RenderDeferredTask.DrawSceneOctree.numFreeCells; }, // getter
function(value) { return value; });
this.update = function () {
var numFree = Render.RenderDeferredTask.DrawSceneOctree.numFreeCells;
var numAlloc = Render.RenderDeferredTask.DrawSceneOctree.numAllocatedCells;
var title = [
' ' + name,
numFree + ' / ' + numAlloc
].join('\t');
widget.editTitle({ text: title });
slider.setMaxValue(numAlloc);
};
*/
function mouseMoveEvent(event) {
panel.mouseMoveEvent(event);
}
function mousePressEvent(event) {
panel.mousePressEvent(event);
}
function mouseReleaseEvent(event) {
panel.mouseReleaseEvent(event);
}
Controller.mouseMoveEvent.connect(mouseMoveEvent);
Controller.mousePressEvent.connect(mousePressEvent);
Controller.mouseReleaseEvent.connect(mouseReleaseEvent);
function scriptEnding() {
panel.destroy();
Render.RenderDeferredTask.DrawSceneOctree.enabled = false;
Render.RenderDeferredTask.DrawItemSelection.enabled = false;
}
Script.scriptEnding.connect(scriptEnding);

View file

@ -11,7 +11,6 @@
#include "IceServer.h"
#include <openssl/rsa.h>
#include <openssl/x509.h>
#include <QtCore/QJsonDocument>
@ -68,7 +67,9 @@ bool IceServer::packetVersionMatch(const udt::Packet& packet) {
}
void IceServer::processPacket(std::unique_ptr<udt::Packet> packet) {
_lastPacketTimestamp = QDateTime::currentMSecsSinceEpoch();
auto nlPacket = NLPacket::fromBase(std::move(packet));
// make sure that this packet at least looks like something we can read
@ -161,15 +162,12 @@ SharedNetworkPeer IceServer::addOrUpdateHeartbeatingPeer(NLPacket& packet) {
}
bool IceServer::isVerifiedHeartbeat(const QUuid& domainID, const QByteArray& plaintext, const QByteArray& signature) {
// check if we have a private key for this domain ID - if we do not then fire off the request for it
// check if we have a public key for this domain ID - if we do not then fire off the request for it
auto it = _domainPublicKeys.find(domainID);
if (it != _domainPublicKeys.end()) {
// attempt to verify the signature for this heartbeat
const unsigned char* publicKeyData = reinterpret_cast<const unsigned char*>(it->second.constData());
// first load up the public key into an RSA struct
RSA* rsaPublicKey = d2i_RSA_PUBKEY(NULL, &publicKeyData, it->second.size());
const auto rsaPublicKey = it->second.get();
if (rsaPublicKey) {
auto hashedPlaintext = QCryptographicHash::hash(plaintext, QCryptographicHash::Sha256);
@ -180,9 +178,6 @@ bool IceServer::isVerifiedHeartbeat(const QUuid& domainID, const QByteArray& pla
signature.size(),
rsaPublicKey);
// free up the public key and remove connection token before we return
RSA_free(rsaPublicKey);
if (verificationResult == 1) {
// this is the only success case - we return true here to indicate that the heartbeat is verified
return true;
@ -192,7 +187,7 @@ bool IceServer::isVerifiedHeartbeat(const QUuid& domainID, const QByteArray& pla
} else {
// we can't let this user in since we couldn't convert their public key to an RSA key we could use
qWarning() << "Could not convert in-memory public key for" << domainID << "to usable RSA public key.";
qWarning() << "Public key for" << domainID << "is not a usable RSA* public key.";
qWarning() << "Re-requesting public key from API";
}
}
@ -240,7 +235,22 @@ void IceServer::publicKeyReplyFinished(QNetworkReply* reply) {
if (responseObject[STATUS_KEY].toString() == SUCCESS_VALUE) {
auto dataObject = responseObject[DATA_KEY].toObject();
if (dataObject.contains(PUBLIC_KEY_KEY)) {
_domainPublicKeys[domainID] = QByteArray::fromBase64(dataObject[PUBLIC_KEY_KEY].toString().toUtf8());
// grab the base 64 public key from the API response
auto apiPublicKey = QByteArray::fromBase64(dataObject[PUBLIC_KEY_KEY].toString().toUtf8());
// convert the downloaded public key to an RSA struct, if possible
const unsigned char* publicKeyData = reinterpret_cast<const unsigned char*>(apiPublicKey.constData());
RSA* rsaPublicKey = d2i_RSA_PUBKEY(NULL, &publicKeyData, apiPublicKey.size());
if (rsaPublicKey) {
_domainPublicKeys[domainID] = { rsaPublicKey, RSA_free };
} else {
qWarning() << "Could not convert in-memory public key for" << domainID << "to usable RSA public key.";
qWarning() << "Public key will be re-requested on next heartbeat.";
}
} else {
qWarning() << "There was no public key present in response for domain with ID" << domainID;
}
@ -254,6 +264,8 @@ void IceServer::publicKeyReplyFinished(QNetworkReply* reply) {
qWarning() << "Error retreiving public key for domain with ID" << domainID << "-" << reply->errorString();
}
reply->deleteLater();
}
void IceServer::sendPeerInformationPacket(const NetworkPeer& peer, const HifiSockAddr* destinationSockAddr) {
@ -274,6 +286,11 @@ void IceServer::clearInactivePeers() {
if ((usecTimestampNow() - peer->getLastHeardMicrostamp()) > (PEER_SILENCE_THRESHOLD_MSECS * 1000)) {
qDebug() << "Removing peer from memory for inactivity -" << *peer;
// if we had a public key for this domain, remove it now
_domainPublicKeys.erase(peer->getUUID());
// remove the peer object
peerItem = _activePeers.erase(peerItem);
} else {
// we didn't kill this peer, push the iterator forwards
@ -288,7 +305,14 @@ bool IceServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url, b
if (connection->requestOperation() == QNetworkAccessManager::GetOperation) {
if (url.path() == "/status") {
connection->respond(HTTPConnection::StatusCode200, QByteArray::number(_activePeers.size()));
// figure out if we respond with 0 (we're good) or 1 (we think we're in trouble)
const quint64 MAX_PACKET_GAP_MS_FOR_STUCK_SOCKET = 10 * 1000;
int statusNumber = (QDateTime::currentMSecsSinceEpoch() - _lastPacketTimestamp > MAX_PACKET_GAP_MS_FOR_STUCK_SOCKET)
? 1 : 0;
connection->respond(HTTPConnection::StatusCode200, QByteArray::number(statusNumber));
}
}
return true;

View file

@ -16,6 +16,8 @@
#include <QtCore/QSharedPointer>
#include <QUdpSocket>
#include <openssl/rsa.h>
#include <UUIDHasher.h>
#include <NetworkPeer.h>
@ -52,8 +54,11 @@ private:
HTTPManager _httpManager;
using DomainPublicKeyHash = std::unordered_map<QUuid, QByteArray>;
using RSAUniquePtr = std::unique_ptr<RSA, std::function<void(RSA*)>>;
using DomainPublicKeyHash = std::unordered_map<QUuid, RSAUniquePtr>;
DomainPublicKeyHash _domainPublicKeys;
quint64 _lastPacketTimestamp;
};
#endif // hifi_IceServer_h

View file

@ -1,6 +1,7 @@
import QtQuick 2.3
import QtQuick.Controls 1.2
import QtWebEngine 1.1
import QtWebChannel 1.0
import "windows" as Windows
import "controls" as Controls
@ -15,11 +16,24 @@ Windows.Window {
// Don't destroy on close... otherwise the JS/C++ will have a dangling pointer
destroyOnCloseButton: false
property alias source: webview.url
property alias eventBridge: eventBridgeWrapper.eventBridge;
QtObject {
id: eventBridgeWrapper
WebChannel.id: "eventBridgeWrapper"
property var eventBridge;
}
// This is for JS/QML communication, which is unused in a WebWindow,
// but not having this here results in spurious warnings about a
// missing signal
signal sendToScript(var message);
Controls.WebView {
id: webview
url: "about:blank"
anchors.fill: parent
focus: true
webChannel.registeredObjects: [eventBridgeWrapper]
}
} // dialog

View file

@ -20,6 +20,7 @@ Windows.Window {
// Don't destroy on close... otherwise the JS/C++ will have a dangling pointer
destroyOnCloseButton: false
property var source;
property var eventBridge;
property var component;
property var dynamicContent;
onSourceChanged: {

View file

@ -1,7 +1,7 @@
import QtQuick 2.5
import QtQuick.Controls 1.4
import QtWebEngine 1.1
import QtWebChannel 1.0
import Qt.labs.settings 1.0
import "windows" as Windows
@ -37,14 +37,26 @@ Windows.Window {
Repeater {
model: 4
Tab {
// Force loading of the content even if the tab is not visible
// (required for letting the C++ code access the webview)
active: true
enabled: false;
// we need to store the original url here for future identification
enabled: false
property string originalUrl: "";
onEnabledChanged: toolWindow.updateVisiblity();
Controls.WebView {
id: webView;
anchors.fill: parent
enabled: false
property alias eventBridgeWrapper: eventBridgeWrapper
QtObject {
id: eventBridgeWrapper
WebChannel.id: "eventBridgeWrapper"
property var eventBridge;
}
webChannel.registeredObjects: [eventBridgeWrapper]
onEnabledChanged: toolWindow.updateVisiblity();
}
}
}
@ -113,20 +125,23 @@ Windows.Window {
var tab = tabView.getTab(index);
tab.title = "";
tab.originalUrl = "";
tab.enabled = false;
tab.originalUrl = "";
tab.item.url = "about:blank";
tab.item.enabled = false;
}
function addWebTab(properties) {
if (!properties.source) {
console.warn("Attempted to open Web Tool Pane without URL")
console.warn("Attempted to open Web Tool Pane without URL");
return;
}
var existingTabIndex = findIndexForUrl(properties.source);
if (existingTabIndex >= 0) {
console.log("Existing tab " + existingTabIndex + " found with URL " + properties.source)
return tabView.getTab(existingTabIndex);
console.log("Existing tab " + existingTabIndex + " found with URL " + properties.source);
var tab = tabView.getTab(existingTabIndex);
return tab.item;
}
var freeTabIndex = findFreeTab();
@ -135,25 +150,28 @@ Windows.Window {
return;
}
var newTab = tabView.getTab(freeTabIndex);
newTab.title = properties.title || "Unknown";
newTab.originalUrl = properties.source;
newTab.item.url = properties.source;
newTab.active = true;
if (properties.width) {
tabView.width = Math.min(Math.max(tabView.width, properties.width),
toolWindow.maxSize.x);
tabView.width = Math.min(Math.max(tabView.width, properties.width), toolWindow.maxSize.x);
}
if (properties.height) {
tabView.height = Math.min(Math.max(tabView.height, properties.height),
toolWindow.maxSize.y);
tabView.height = Math.min(Math.max(tabView.height, properties.height), toolWindow.maxSize.y);
}
console.log("Updating visibility based on child tab added");
newTab.enabledChanged.connect(updateVisiblity)
updateVisiblity();
return newTab
var tab = tabView.getTab(freeTabIndex);
tab.title = properties.title || "Unknown";
tab.enabled = true;
console.log("New tab URL: " + properties.source)
tab.originalUrl = properties.source;
var eventBridge = properties.eventBridge;
console.log("Event bridge: " + eventBridge);
var result = tab.item;
result.enabled = true;
console.log("Setting event bridge: " + eventBridge);
result.eventBridgeWrapper.eventBridge = eventBridge;
result.url = properties.source;
return result;
}
}

View file

@ -59,6 +59,7 @@ WebEngineView {
request.openIn(newWindow.webView)
}
profile: desktop.browserProfile
// This breaks the webchannel used for passing messages. Fixed in Qt 5.6
// See https://bugreports.qt.io/browse/QTBUG-49521
//profile: desktop.browserProfile
}

View file

@ -224,7 +224,6 @@ static const QString DESKTOP_LOCATION = QStandardPaths::writableLocation(QStanda
static const QString DESKTOP_LOCATION = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation).append("/script.js");
#endif
const QString DEFAULT_SCRIPTS_JS_URL = "http://s3.amazonaws.com/hifi-public/scripts/defaultScripts.js";
Setting::Handle<int> maxOctreePacketsPerSecond("maxOctreePPS", DEFAULT_MAX_OCTREE_PPS);
const QHash<QString, Application::AcceptURLMethod> Application::_acceptedExtensions {
@ -270,10 +269,10 @@ public:
void run() override {
while (!_quit) {
QThread::sleep(HEARTBEAT_UPDATE_INTERVAL_SECS);
auto now = usecTimestampNow();
// in the unlikely event that now is less than _heartbeat, don't rollover and confuse ourselves
auto lastHeartbeatAge = (now > _heartbeat) ? now - _heartbeat : 0;
uint64_t lastHeartbeat = _heartbeat; // sample atomic _heartbeat, because we could context switch away and have it updated on us
uint64_t now = usecTimestampNow();
auto lastHeartbeatAge = (now > lastHeartbeat) ? now - lastHeartbeat : 0;
auto sinceLastReport = (now > _lastReport) ? now - _lastReport : 0;
auto elapsedMovingAverage = _movingAverage.getAverage();
@ -310,7 +309,7 @@ public:
if (lastHeartbeatAge > MAX_HEARTBEAT_AGE_USECS) {
qDebug() << "DEADLOCK DETECTED -- "
<< "lastHeartbeatAge:" << lastHeartbeatAge
<< "[ _heartbeat:" << _heartbeat
<< "[ lastHeartbeat :" << lastHeartbeat
<< "now:" << now << " ]"
<< "elapsedMovingAverage:" << elapsedMovingAverage
<< "maxElapsed:" << _maxElapsed
@ -598,8 +597,18 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
audioThread->setObjectName("Audio Thread");
auto audioIO = DependencyManager::get<AudioClient>();
audioIO->setPositionGetter([this]{ return getMyAvatar()->getPositionForAudio(); });
audioIO->setOrientationGetter([this]{ return getMyAvatar()->getOrientationForAudio(); });
audioIO->setPositionGetter([]{
auto avatarManager = DependencyManager::get<AvatarManager>();
auto myAvatar = avatarManager ? avatarManager->getMyAvatar() : nullptr;
return myAvatar ? myAvatar->getPositionForAudio() : Vectors::ZERO;
});
audioIO->setOrientationGetter([]{
auto avatarManager = DependencyManager::get<AvatarManager>();
auto myAvatar = avatarManager ? avatarManager->getMyAvatar() : nullptr;
return myAvatar ? myAvatar->getOrientationForAudio() : Quaternions::IDENTITY;
});
audioIO->moveToThread(audioThread);
recording::Frame::registerFrameHandler(AudioConstants::getAudioFrameName(), [=](recording::Frame::ConstPointer frame) {
@ -2790,43 +2799,50 @@ void Application::calibrateEyeTracker5Points() {
}
#endif
bool Application::exportEntities(const QString& filename, const QVector<EntityItemID>& entityIDs) {
QVector<EntityItemPointer> entities;
bool Application::exportEntities(const QString& filename, const QVector<EntityItemID>& entityIDs, const glm::vec3* givenOffset) {
QHash<EntityItemID, EntityItemPointer> entities;
auto entityTree = getEntities()->getTree();
auto exportTree = std::make_shared<EntityTree>();
exportTree->createRootElement();
glm::vec3 root(TREE_SCALE, TREE_SCALE, TREE_SCALE);
for (auto entityID : entityIDs) {
for (auto entityID : entityIDs) { // Gather entities and properties.
auto entityItem = entityTree->findEntityByEntityItemID(entityID);
if (!entityItem) {
qCWarning(interfaceapp) << "Skipping export of" << entityID << "that is not in scene.";
continue;
}
auto properties = entityItem->getProperties();
auto position = properties.getPosition();
root.x = glm::min(root.x, position.x);
root.y = glm::min(root.y, position.y);
root.z = glm::min(root.z, position.z);
entities << entityItem;
if (!givenOffset) {
EntityItemID parentID = entityItem->getParentID();
if (parentID.isInvalidID() || !entityIDs.contains(parentID) || !entityTree->findEntityByEntityItemID(parentID)) {
auto position = entityItem->getPosition(); // If parent wasn't selected, we want absolute position, which isn't in properties.
root.x = glm::min(root.x, position.x);
root.y = glm::min(root.y, position.y);
root.z = glm::min(root.z, position.z);
}
}
entities[entityID] = entityItem;
}
if (entities.size() == 0) {
return false;
}
for (auto entityItem : entities) {
auto properties = entityItem->getProperties();
properties.setPosition(properties.getPosition() - root);
exportTree->addEntity(entityItem->getEntityItemID(), properties);
if (givenOffset) {
root = *givenOffset;
}
for (EntityItemPointer& entityDatum : entities) {
auto properties = entityDatum->getProperties();
EntityItemID parentID = properties.getParentID();
if (parentID.isInvalidID()) {
properties.setPosition(properties.getPosition() - root);
} else if (!entities.contains(parentID)) {
entityDatum->globalizeProperties(properties, "Parent %3 of %2 %1 is not selected for export.", -root);
} // else valid parent -- don't offset
exportTree->addEntity(entityDatum->getEntityItemID(), properties);
}
// remap IDs on export so that we aren't publishing the IDs of entities in our domain
exportTree->remapIDs();
exportTree->writeToJSONFile(filename.toLocal8Bit().constData());
@ -2836,33 +2852,14 @@ bool Application::exportEntities(const QString& filename, const QVector<EntityIt
}
bool Application::exportEntities(const QString& filename, float x, float y, float z, float scale) {
glm::vec3 offset(x, y, z);
QVector<EntityItemPointer> entities;
getEntities()->getTree()->findEntities(AACube(glm::vec3(x, y, z), scale), entities);
if (entities.size() > 0) {
glm::vec3 root(x, y, z);
auto exportTree = std::make_shared<EntityTree>();
exportTree->createRootElement();
for (int i = 0; i < entities.size(); i++) {
EntityItemProperties properties = entities.at(i)->getProperties();
EntityItemID id = entities.at(i)->getEntityItemID();
properties.setPosition(properties.getPosition() - root);
exportTree->addEntity(id, properties);
}
// remap IDs on export so that we aren't publishing the IDs of entities in our domain
exportTree->remapIDs();
exportTree->writeToSVOFile(filename.toLocal8Bit().constData());
} else {
qCDebug(interfaceapp) << "No models were selected";
return false;
QVector<EntityItemID> ids;
getEntities()->getTree()->findEntities(AACube(offset, scale), entities);
foreach(EntityItemPointer entity, entities) {
ids << entity->getEntityItemID();
}
// restore the main window's active state
_window->activateWindow();
return true;
return exportEntities(filename, ids, &offset);
}
void Application::loadSettings() {
@ -2893,16 +2890,8 @@ void Application::saveSettings() {
bool Application::importEntities(const QString& urlOrFilename) {
_entityClipboard->eraseAllOctreeElements();
QUrl url(urlOrFilename);
// if the URL appears to be invalid or relative, then it is probably a local file
if (!url.isValid() || url.isRelative()) {
url = QUrl::fromLocalFile(urlOrFilename);
}
bool success = _entityClipboard->readFromURL(url.toString());
bool success = _entityClipboard->readFromURL(urlOrFilename);
if (success) {
_entityClipboard->remapIDs();
_entityClipboard->reaverageOctreeElements();
}
return success;
@ -2985,6 +2974,11 @@ void Application::updateLOD() {
}
}
void Application::pushPreRenderLambda(void* key, std::function<void()> func) {
std::unique_lock<std::mutex> guard(_preRenderLambdasLock);
_preRenderLambdas[key] = func;
}
// Called during Application::update immediately before AvatarManager::updateMyAvatar, updating my data that is then sent to everyone.
// (Maybe this code should be moved there?)
// The principal result is to call updateLookAtTargetAvatar() and then setLookAtPosition().
@ -3461,6 +3455,16 @@ void Application::update(float deltaTime) {
QMetaObject::invokeMethod(DependencyManager::get<AudioClient>().data(), "sendDownstreamAudioStatsPacket", Qt::QueuedConnection);
}
}
{
PROFILE_RANGE_EX("PreRenderLambdas", 0xffff0000, (uint64_t)0);
std::unique_lock<std::mutex> guard(_preRenderLambdasLock);
for (auto& iter : _preRenderLambdas) {
iter.second();
}
_preRenderLambdas.clear();
}
}
@ -4886,13 +4890,39 @@ void Application::updateDisplayMode() {
{
std::unique_lock<std::mutex> lock(_displayPluginLock);
auto oldDisplayPlugin = _displayPlugin;
if (_displayPlugin) {
_displayPlugin->deactivate();
}
// FIXME probably excessive and useless context switching
_offscreenContext->makeCurrent();
newDisplayPlugin->activate();
bool active = newDisplayPlugin->activate();
if (!active) {
// If the new plugin fails to activate, fallback to last display
qWarning() << "Failed to activate display: " << newDisplayPlugin->getName();
newDisplayPlugin = oldDisplayPlugin;
if (newDisplayPlugin) {
qWarning() << "Falling back to last display: " << newDisplayPlugin->getName();
active = newDisplayPlugin->activate();
}
// If there is no last display, or
// If the last display fails to activate, fallback to desktop
if (!active) {
newDisplayPlugin = displayPlugins.at(0);
qWarning() << "Falling back to display: " << newDisplayPlugin->getName();
active = newDisplayPlugin->activate();
}
if (!active) {
qFatal("Failed to activate fallback plugin");
}
}
_offscreenContext->makeCurrent();
offscreenUi->resize(fromGlm(newDisplayPlugin->getRecommendedUiSize()));
_offscreenContext->makeCurrent();

View file

@ -211,6 +211,8 @@ public:
render::EnginePointer getRenderEngine() override { return _renderEngine; }
gpu::ContextPointer getGPUContext() const { return _gpuContext; }
virtual void pushPreRenderLambda(void* key, std::function<void()> func) override;
const QRect& getMirrorViewRect() const { return _mirrorViewRect; }
void updateMyAvatarLookAtPosition();
@ -233,7 +235,7 @@ signals:
public slots:
QVector<EntityItemID> pasteEntities(float x, float y, float z);
bool exportEntities(const QString& filename, const QVector<EntityItemID>& entityIDs);
bool exportEntities(const QString& filename, const QVector<EntityItemID>& entityIDs, const glm::vec3* givenOffset = nullptr);
bool exportEntities(const QString& filename, float x, float y, float z, float scale);
bool importEntities(const QString& url);
@ -510,6 +512,9 @@ private:
bool _cursorNeedsChanging { false };
QThread* _deadlockWatchdogThread;
std::map<void*, std::function<void()>> _preRenderLambdas;
std::mutex _preRenderLambdasLock;
};
#endif // hifi_Application_h

View file

@ -16,7 +16,7 @@
#include "InterfaceParentFinder.h"
SpatiallyNestableWeakPointer InterfaceParentFinder::find(QUuid parentID, bool& success) const {
SpatiallyNestableWeakPointer InterfaceParentFinder::find(QUuid parentID, bool& success, SpatialParentTree* entityTree) const {
SpatiallyNestableWeakPointer parent;
if (parentID.isNull()) {
@ -25,9 +25,13 @@ SpatiallyNestableWeakPointer InterfaceParentFinder::find(QUuid parentID, bool& s
}
// search entities
EntityTreeRenderer* treeRenderer = qApp->getEntities();
EntityTreePointer tree = treeRenderer ? treeRenderer->getTree() : nullptr;
parent = tree ? tree->findEntityByEntityItemID(parentID) : nullptr;
if (entityTree) {
parent = entityTree->findByID(parentID);
} else {
EntityTreeRenderer* treeRenderer = qApp->getEntities();
EntityTreePointer tree = treeRenderer ? treeRenderer->getTree() : nullptr;
parent = tree ? tree->findEntityByEntityItemID(parentID) : nullptr;
}
if (!parent.expired()) {
success = true;
return parent;

View file

@ -21,7 +21,7 @@ class InterfaceParentFinder : public SpatialParentFinder {
public:
InterfaceParentFinder() { }
virtual ~InterfaceParentFinder() { }
virtual SpatiallyNestableWeakPointer find(QUuid parentID, bool& success) const;
virtual SpatiallyNestableWeakPointer find(QUuid parentID, bool& success, SpatialParentTree* entityTree = nullptr) const;
};
#endif // hifi_InterfaceParentFinder_h

View file

@ -466,8 +466,8 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
* (1.0f - ((float)(now - getHead()->getLookingAtMeStarted()))
/ (LOOKING_AT_ME_DURATION * (float)USECS_PER_SECOND));
if (alpha > 0.0f) {
QSharedPointer<NetworkGeometry> geometry = _skeletonModel->getGeometry();
if (geometry && geometry->isLoaded()) {
if (_skeletonModel->isLoaded()) {
const auto& geometry = _skeletonModel->getFBXGeometry();
const float DEFAULT_EYE_DIAMETER = 0.048f; // Typical human eye
const float RADIUS_INCREMENT = 0.005f;
batch.setModelTransform(Transform());
@ -475,7 +475,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
glm::vec3 position = getHead()->getLeftEyePosition();
Transform transform;
transform.setTranslation(position);
float eyeDiameter = geometry->getFBXGeometry().leftEyeSize;
float eyeDiameter = geometry.leftEyeSize;
if (eyeDiameter == 0.0f) {
eyeDiameter = DEFAULT_EYE_DIAMETER;
}
@ -486,7 +486,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
position = getHead()->getRightEyePosition();
transform.setTranslation(position);
eyeDiameter = geometry->getFBXGeometry().rightEyeSize;
eyeDiameter = geometry.rightEyeSize;
if (eyeDiameter == 0.0f) {
eyeDiameter = DEFAULT_EYE_DIAMETER;
}
@ -815,7 +815,7 @@ int Avatar::getJointIndex(const QString& name) const {
Q_RETURN_ARG(int, result), Q_ARG(const QString&, name));
return result;
}
return _skeletonModel->isActive() ? _skeletonModel->getGeometry()->getFBXGeometry().getJointIndex(name) : -1;
return _skeletonModel->isActive() ? _skeletonModel->getFBXGeometry().getJointIndex(name) : -1;
}
QStringList Avatar::getJointNames() const {
@ -825,7 +825,7 @@ QStringList Avatar::getJointNames() const {
Q_RETURN_ARG(QStringList, result));
return result;
}
return _skeletonModel->isActive() ? _skeletonModel->getGeometry()->getFBXGeometry().getJointNames() : QStringList();
return _skeletonModel->isActive() ? _skeletonModel->getFBXGeometry().getJointNames() : QStringList();
}
glm::vec3 Avatar::getJointPosition(int index) const {

View file

@ -373,32 +373,34 @@ void MyAvatar::simulate(float deltaTime) {
EntityTreeRenderer* entityTreeRenderer = qApp->getEntities();
EntityTreePointer entityTree = entityTreeRenderer ? entityTreeRenderer->getTree() : nullptr;
if (entityTree) {
auto now = usecTimestampNow();
EntityEditPacketSender* packetSender = qApp->getEntityEditPacketSender();
MovingEntitiesOperator moveOperator(entityTree);
forEachDescendant([&](SpatiallyNestablePointer object) {
// if the queryBox has changed, tell the entity-server
if (object->computePuffedQueryAACube() && object->getNestableType() == NestableType::Entity) {
EntityItemPointer entity = std::static_pointer_cast<EntityItem>(object);
bool success;
AACube newCube = entity->getQueryAACube(success);
if (success) {
moveOperator.addEntityToMoveList(entity, newCube);
}
if (packetSender) {
EntityItemProperties properties = entity->getProperties();
properties.setQueryAACubeDirty();
properties.setLastEdited(now);
packetSender->queueEditEntityMessage(PacketType::EntityEdit, entity->getID(), properties);
entity->setLastBroadcast(usecTimestampNow());
entityTree->withWriteLock([&] {
auto now = usecTimestampNow();
EntityEditPacketSender* packetSender = qApp->getEntityEditPacketSender();
MovingEntitiesOperator moveOperator(entityTree);
forEachDescendant([&](SpatiallyNestablePointer object) {
// if the queryBox has changed, tell the entity-server
if (object->computePuffedQueryAACube() && object->getNestableType() == NestableType::Entity) {
EntityItemPointer entity = std::static_pointer_cast<EntityItem>(object);
bool success;
AACube newCube = entity->getQueryAACube(success);
if (success) {
moveOperator.addEntityToMoveList(entity, newCube);
}
if (packetSender) {
EntityItemProperties properties = entity->getProperties();
properties.setQueryAACubeDirty();
properties.setLastEdited(now);
packetSender->queueEditEntityMessage(PacketType::EntityEdit, entity->getID(), properties);
entity->setLastBroadcast(usecTimestampNow());
}
}
});
// also update the position of children in our local octree
if (moveOperator.hasMovingEntities()) {
PerformanceTimer perfTimer("recurseTreeWithOperator");
entityTree->recurseTreeWithOperator(&moveOperator);
}
});
// also update the position of children in our local octree
if (moveOperator.hasMovingEntities()) {
PerformanceTimer perfTimer("recurseTreeWithOperator");
entityTree->recurseTreeWithOperator(&moveOperator);
}
}
}
@ -1272,8 +1274,8 @@ void MyAvatar::setVisibleInSceneIfReady(Model* model, render::ScenePointer scene
void MyAvatar::initHeadBones() {
int neckJointIndex = -1;
if (_skeletonModel->getGeometry()) {
neckJointIndex = _skeletonModel->getGeometry()->getFBXGeometry().neckJointIndex;
if (_skeletonModel->isLoaded()) {
neckJointIndex = _skeletonModel->getFBXGeometry().neckJointIndex;
}
if (neckJointIndex == -1) {
return;

View file

@ -39,12 +39,12 @@ SkeletonModel::~SkeletonModel() {
}
void SkeletonModel::initJointStates() {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const FBXGeometry& geometry = getFBXGeometry();
glm::mat4 modelOffset = glm::scale(_scale) * glm::translate(_offset);
_rig->initJointStates(geometry, modelOffset);
// Determine the default eye position for avatar scale = 1.0
int headJointIndex = _geometry->getFBXGeometry().headJointIndex;
int headJointIndex = geometry.headJointIndex;
if (0 > headJointIndex || headJointIndex >= _rig->getJointStateCount()) {
qCWarning(interfaceapp) << "Bad head joint! Got:" << headJointIndex << "jointCount:" << _rig->getJointStateCount();
}
@ -52,7 +52,7 @@ void SkeletonModel::initJointStates() {
getEyeModelPositions(leftEyePosition, rightEyePosition);
glm::vec3 midEyePosition = (leftEyePosition + rightEyePosition) / 2.0f;
int rootJointIndex = _geometry->getFBXGeometry().rootJointIndex;
int rootJointIndex = geometry.rootJointIndex;
glm::vec3 rootModelPosition;
getJointPosition(rootJointIndex, rootModelPosition);
@ -87,10 +87,12 @@ Rig::CharacterControllerState convertCharacterControllerState(CharacterControlle
// Called within Model::simulate call, below.
void SkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
const FBXGeometry& geometry = getFBXGeometry();
Head* head = _owningAvatar->getHead();
if (_owningAvatar->isMyAvatar()) {
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
const FBXGeometry& geometry = _geometry->getFBXGeometry();
Rig::HeadParameters headParams;
headParams.enableLean = qApp->isHMDMode();
@ -183,7 +185,6 @@ void SkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
// Thus this should really only be ... else if (_owningAvatar->getHead()->isLookingAtMe()) {...
// However, in the !isLookingAtMe case, the eyes aren't rotating the way they should right now.
// We will revisit that as priorities allow, and particularly after the new rig/animation/joints.
const FBXGeometry& geometry = _geometry->getFBXGeometry();
// If the head is not positioned, updateEyeJoints won't get the math right
glm::quat headOrientation;
@ -329,22 +330,23 @@ float SkeletonModel::getRightArmLength() const {
}
bool SkeletonModel::getHeadPosition(glm::vec3& headPosition) const {
return isActive() && getJointPositionInWorldFrame(_geometry->getFBXGeometry().headJointIndex, headPosition);
return isActive() && getJointPositionInWorldFrame(getFBXGeometry().headJointIndex, headPosition);
}
bool SkeletonModel::getNeckPosition(glm::vec3& neckPosition) const {
return isActive() && getJointPositionInWorldFrame(_geometry->getFBXGeometry().neckJointIndex, neckPosition);
return isActive() && getJointPositionInWorldFrame(getFBXGeometry().neckJointIndex, neckPosition);
}
bool SkeletonModel::getLocalNeckPosition(glm::vec3& neckPosition) const {
return isActive() && getJointPosition(_geometry->getFBXGeometry().neckJointIndex, neckPosition);
return isActive() && getJointPosition(getFBXGeometry().neckJointIndex, neckPosition);
}
bool SkeletonModel::getEyeModelPositions(glm::vec3& firstEyePosition, glm::vec3& secondEyePosition) const {
if (!isActive()) {
return false;
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const FBXGeometry& geometry = getFBXGeometry();
if (getJointPosition(geometry.leftEyeJointIndex, firstEyePosition) &&
getJointPosition(geometry.rightEyeJointIndex, secondEyePosition)) {
return true;
@ -386,11 +388,11 @@ float VERY_BIG_MASS = 1.0e6f;
// virtual
void SkeletonModel::computeBoundingShape() {
if (_geometry == NULL || _rig->jointStatesEmpty()) {
if (!isLoaded() || _rig->jointStatesEmpty()) {
return;
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const FBXGeometry& geometry = getFBXGeometry();
if (geometry.joints.isEmpty() || geometry.rootJointIndex == -1) {
// rootJointIndex == -1 if the avatar model has no skeleton
return;
@ -429,7 +431,7 @@ void SkeletonModel::renderBoundingCollisionShapes(gpu::Batch& batch, float scale
}
bool SkeletonModel::hasSkeleton() {
return isActive() ? _geometry->getFBXGeometry().rootJointIndex != -1 : false;
return isActive() ? getFBXGeometry().rootJointIndex != -1 : false;
}
void SkeletonModel::onInvalidate() {

View file

@ -38,10 +38,10 @@ public:
void updateAttitude();
/// Returns the index of the left hand joint, or -1 if not found.
int getLeftHandJointIndex() const { return isActive() ? _geometry->getFBXGeometry().leftHandJointIndex : -1; }
int getLeftHandJointIndex() const { return isActive() ? getFBXGeometry().leftHandJointIndex : -1; }
/// Returns the index of the right hand joint, or -1 if not found.
int getRightHandJointIndex() const { return isActive() ? _geometry->getFBXGeometry().rightHandJointIndex : -1; }
int getRightHandJointIndex() const { return isActive() ? getFBXGeometry().rightHandJointIndex : -1; }
bool getLeftGrabPosition(glm::vec3& position) const;
bool getRightGrabPosition(glm::vec3& position) const;

View file

@ -43,7 +43,7 @@ void SoftAttachmentModel::updateClusterMatrices(glm::vec3 modelPosition, glm::qu
}
_needsUpdateClusterMatrices = false;
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const FBXGeometry& geometry = getFBXGeometry();
glm::mat4 modelToWorld = glm::mat4_cast(modelOrientation);
for (int i = 0; i < _meshStates.size(); i++) {

View file

@ -23,6 +23,7 @@
#include "AddressManager.h"
#include "Application.h"
#include "InterfaceLogging.h"
#include "UserActivityLogger.h"
#include "MainWindow.h"
#ifdef HAS_BUGSPLAT
@ -102,11 +103,19 @@ int main(int argc, const char* argv[]) {
// Check OpenGL version.
// This is done separately from the main Application so that start-up and shut-down logic within the main Application is
// not made more complicated than it already is.
bool override = false;
QString glVersion;
{
OpenGLVersionChecker openGLVersionChecker(argc, const_cast<char**>(argv));
if (!openGLVersionChecker.isValidVersion()) {
qCDebug(interfaceapp, "Early exit due to OpenGL version.");
return 0;
bool valid = true;
glVersion = openGLVersionChecker.checkVersion(valid, override);
if (!valid) {
if (override) {
qCDebug(interfaceapp, "Running on insufficient OpenGL version: %s.", glVersion.toStdString().c_str());
} else {
qCDebug(interfaceapp, "Early exit due to OpenGL version.");
return 0;
}
}
}
@ -134,6 +143,22 @@ int main(int argc, const char* argv[]) {
QSettings::setDefaultFormat(QSettings::IniFormat);
Application app(argc, const_cast<char**>(argv), startupTime);
// If we failed the OpenGLVersion check, log it.
if (override) {
auto& accountManager = AccountManager::getInstance();
if (accountManager.isLoggedIn()) {
UserActivityLogger::getInstance().insufficientGLVersion(glVersion);
} else {
QObject::connect(&AccountManager::getInstance(), &AccountManager::loginComplete, [glVersion](){
static bool loggedInsufficientGL = false;
if (!loggedInsufficientGL) {
UserActivityLogger::getInstance().insufficientGLVersion(glVersion);
loggedInsufficientGL = true;
}
});
}
}
// Setup local server
QLocalServer server { &app };

View file

@ -18,22 +18,22 @@
QString const ModelOverlay::TYPE = "model";
ModelOverlay::ModelOverlay()
: _model(std::make_shared<Rig>()),
: _model(std::make_shared<Model>(std::make_shared<Rig>())),
_modelTextures(QVariantMap()),
_updateModel(false)
{
_model.init();
_model->init();
_isLoaded = false;
}
ModelOverlay::ModelOverlay(const ModelOverlay* modelOverlay) :
Volume3DOverlay(modelOverlay),
_model(std::make_shared<Rig>()),
_model(std::make_shared<Model>(std::make_shared<Rig>())),
_modelTextures(QVariantMap()),
_url(modelOverlay->_url),
_updateModel(false)
{
_model.init();
_model->init();
if (_url.isValid()) {
_updateModel = true;
_isLoaded = false;
@ -44,27 +44,27 @@ void ModelOverlay::update(float deltatime) {
if (_updateModel) {
_updateModel = false;
_model.setSnapModelToCenter(true);
_model.setScale(getDimensions());
_model.setRotation(getRotation());
_model.setTranslation(getPosition());
_model.setURL(_url);
_model.simulate(deltatime, true);
_model->setSnapModelToCenter(true);
_model->setScale(getDimensions());
_model->setRotation(getRotation());
_model->setTranslation(getPosition());
_model->setURL(_url);
_model->simulate(deltatime, true);
} else {
_model.simulate(deltatime);
_model->simulate(deltatime);
}
_isLoaded = _model.isActive();
_isLoaded = _model->isActive();
}
bool ModelOverlay::addToScene(Overlay::Pointer overlay, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges) {
Volume3DOverlay::addToScene(overlay, scene, pendingChanges);
_model.addToScene(scene, pendingChanges);
_model->addToScene(scene, pendingChanges);
return true;
}
void ModelOverlay::removeFromScene(Overlay::Pointer overlay, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges) {
Volume3DOverlay::removeFromScene(overlay, scene, pendingChanges);
_model.removeFromScene(scene, pendingChanges);
_model->removeFromScene(scene, pendingChanges);
}
void ModelOverlay::render(RenderArgs* args) {
@ -73,9 +73,9 @@ void ModelOverlay::render(RenderArgs* args) {
// fix them up in the scene
render::ScenePointer scene = qApp->getMain3DScene();
render::PendingChanges pendingChanges;
if (_model.needsFixupInScene()) {
_model.removeFromScene(scene, pendingChanges);
_model.addToScene(scene, pendingChanges);
if (_model->needsFixupInScene()) {
_model->removeFromScene(scene, pendingChanges);
_model->addToScene(scene, pendingChanges);
}
scene->enqueuePendingChanges(pendingChanges);
@ -100,7 +100,7 @@ void ModelOverlay::setProperties(const QVariantMap& properties) {
if (newScale.x <= 0 || newScale.y <= 0 || newScale.z <= 0) {
setDimensions(scale);
} else {
_model.setScaleToFit(true, getDimensions());
_model->setScaleToFit(true, getDimensions());
_updateModel = true;
}
}
@ -120,7 +120,7 @@ void ModelOverlay::setProperties(const QVariantMap& properties) {
QUrl newTextureURL = textureMap[key].toUrl();
qDebug() << "Updating texture named" << key << "to texture at URL" << newTextureURL;
QMetaObject::invokeMethod(&_model, "setTextureWithNameToURL", Qt::AutoConnection,
QMetaObject::invokeMethod(_model.get(), "setTextureWithNameToURL", Qt::AutoConnection,
Q_ARG(const QString&, key),
Q_ARG(const QUrl&, newTextureURL));
@ -134,7 +134,7 @@ QVariant ModelOverlay::getProperty(const QString& property) {
return _url.toString();
}
if (property == "dimensions" || property == "scale" || property == "size") {
return vec3toVariant(_model.getScaleToFitDimensions());
return vec3toVariant(_model->getScaleToFitDimensions());
}
if (property == "textures") {
if (_modelTextures.size() > 0) {
@ -155,13 +155,13 @@ bool ModelOverlay::findRayIntersection(const glm::vec3& origin, const glm::vec3&
float& distance, BoxFace& face, glm::vec3& surfaceNormal) {
QString subMeshNameTemp;
return _model.findRayIntersectionAgainstSubMeshes(origin, direction, distance, face, surfaceNormal, subMeshNameTemp);
return _model->findRayIntersectionAgainstSubMeshes(origin, direction, distance, face, surfaceNormal, subMeshNameTemp);
}
bool ModelOverlay::findRayIntersectionExtraInfo(const glm::vec3& origin, const glm::vec3& direction,
float& distance, BoxFace& face, glm::vec3& surfaceNormal, QString& extraInfo) {
return _model.findRayIntersectionAgainstSubMeshes(origin, direction, distance, face, surfaceNormal, extraInfo);
return _model->findRayIntersectionAgainstSubMeshes(origin, direction, distance, face, surfaceNormal, extraInfo);
}
ModelOverlay* ModelOverlay::createClone() const {

View file

@ -41,11 +41,11 @@ public:
private:
Model _model;
ModelPointer _model;
QVariantMap _modelTextures;
QUrl _url;
bool _updateModel;
};
#endif // hifi_ModelOverlay_h
#endif // hifi_ModelOverlay_h

View file

@ -1056,7 +1056,9 @@ void Rig::updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm
// limit rotation
const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE;
deltaQuat = glm::angleAxis(glm::clamp(glm::angle(deltaQuat), -MAX_ANGLE, MAX_ANGLE), glm::axis(deltaQuat));
if (fabsf(glm::angle(deltaQuat)) > MAX_ANGLE) {
deltaQuat = glm::angleAxis(glm::clamp(glm::angle(deltaQuat), -MAX_ANGLE, MAX_ANGLE), glm::axis(deltaQuat));
}
// directly set absolutePose rotation
_internalPoseSet._absolutePoses[index].rot = deltaQuat * headQuat;

View file

@ -221,6 +221,8 @@ public:
void setEnableInverseKinematics(bool enable);
const glm::mat4& getGeometryToRigTransform() const { return _geometryToRigTransform; }
protected:
bool isIndexValid(int index) const { return _animSkeleton && index >= 0 && index < _animSkeleton->getNumJoints(); }
void updateAnimationStateHandlers();

View file

@ -50,6 +50,9 @@
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 100;
static const auto DEFAULT_POSITION_GETTER = []{ return Vectors::ZERO; };
static const auto DEFAULT_ORIENTATION_GETTER = [] { return Quaternions::IDENTITY; };
Setting::Handle<bool> dynamicJitterBuffers("dynamicJitterBuffers", DEFAULT_DYNAMIC_JITTER_BUFFERS);
Setting::Handle<int> maxFramesOverDesired("maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED);
Setting::Handle<int> staticDesiredJitterBufferFrames("staticDesiredJitterBufferFrames",
@ -103,7 +106,9 @@ AudioClient::AudioClient() :
_outgoingAvatarAudioSequenceNumber(0),
_audioOutputIODevice(_receivedAudioStream, this),
_stats(&_receivedAudioStream),
_inputGate()
_inputGate(),
_positionGetter(DEFAULT_POSITION_GETTER),
_orientationGetter(DEFAULT_ORIENTATION_GETTER)
{
// clear the array of locally injected samples
memset(_localProceduralSamples, 0, AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL);
@ -550,7 +555,7 @@ void AudioClient::configureReverb() {
p.wetDryMix = 100.0f;
p.preDelay = 0.0f;
p.earlyGain = -96.0f; // disable ER
p.lateGain -= 12.0f; // quieter than listener reverb
p.lateGain += _reverbOptions->getWetDryMix() * (24.0f/100.0f) - 24.0f; // -0dB to -24dB, based on wetDryMix
p.lateMixLeft = 0.0f;
p.lateMixRight = 0.0f;

View file

@ -19,9 +19,7 @@ const QString Basic2DWindowOpenGLDisplayPlugin::NAME("Desktop");
static const QString FULLSCREEN = "Fullscreen";
void Basic2DWindowOpenGLDisplayPlugin::internalActivate() {
Parent::internalActivate();
bool Basic2DWindowOpenGLDisplayPlugin::internalActivate() {
_framerateActions.clear();
_container->addMenuItem(PluginType::DISPLAY_PLUGIN, MENU_PATH(), FULLSCREEN,
[this](bool clicked) {
@ -33,6 +31,8 @@ void Basic2DWindowOpenGLDisplayPlugin::internalActivate() {
}, true, false);
updateFramerate();
return Parent::internalActivate();
}
void Basic2DWindowOpenGLDisplayPlugin::submitSceneTexture(uint32_t frameIndex, const gpu::TexturePointer& sceneTexture) {

View file

@ -22,7 +22,7 @@ public:
virtual float getTargetFrameRate() override { return _framerateTarget ? (float) _framerateTarget : TARGET_FRAMERATE_Basic2DWindowOpenGL; }
virtual void internalActivate() override;
virtual bool internalActivate() override;
virtual void submitSceneTexture(uint32_t frameIndex, const gpu::TexturePointer& sceneTexture) override;

View file

@ -219,7 +219,7 @@ void OpenGLDisplayPlugin::cleanupForSceneTexture(const gpu::TexturePointer& scen
}
void OpenGLDisplayPlugin::activate() {
bool OpenGLDisplayPlugin::activate() {
if (!_cursorsData.size()) {
auto& cursorManager = Cursor::Manager::instance();
for (const auto iconId : cursorManager.registeredIcons()) {
@ -238,7 +238,9 @@ void OpenGLDisplayPlugin::activate() {
// Child classes may override this in order to do things like initialize
// libraries, etc
internalActivate();
if (!internalActivate()) {
return false;
}
#if THREADED_PRESENT
@ -263,7 +265,8 @@ void OpenGLDisplayPlugin::activate() {
customizeContext();
_container->makeRenderingContextCurrent();
#endif
DisplayPlugin::activate();
return DisplayPlugin::activate();
}
void OpenGLDisplayPlugin::deactivate() {

View file

@ -32,7 +32,7 @@ public:
// These must be final to ensure proper ordering of operations
// between the main thread and the presentation thread
void activate() override final;
bool activate() override final;
void deactivate() override final;
bool eventFilter(QObject* receiver, QEvent* event) override;
@ -77,7 +77,8 @@ protected:
virtual void customizeContext();
virtual void uncustomizeContext();
virtual void internalActivate() {}
// Returns true on successful activation
virtual bool internalActivate() { return true; }
virtual void internalDeactivate() {}
virtual void cleanupForSceneTexture(const gpu::TexturePointer& sceneTexture);
// Plugin specific functionality to send the composed scene to the output window or device

View file

@ -32,7 +32,7 @@ glm::uvec2 HmdDisplayPlugin::getRecommendedUiSize() const {
return CompositorHelper::VIRTUAL_SCREEN_SIZE;
}
void HmdDisplayPlugin::internalActivate() {
bool HmdDisplayPlugin::internalActivate() {
_monoPreview = _container->getBoolSetting("monoPreview", DEFAULT_MONO_VIEW);
_container->addMenuItem(PluginType::DISPLAY_PLUGIN, MENU_PATH(), MONO_PREVIEW,
@ -41,7 +41,8 @@ void HmdDisplayPlugin::internalActivate() {
_container->setBoolSetting("monoPreview", _monoPreview);
}, true, _monoPreview);
_container->removeMenu(FRAMERATE);
Parent::internalActivate();
return Parent::internalActivate();
}
void HmdDisplayPlugin::customizeContext() {

View file

@ -33,7 +33,7 @@ protected:
virtual bool isHmdMounted() const = 0;
virtual void postPreview() {};
void internalActivate() override;
bool internalActivate() override;
void compositeOverlay() override;
void compositePointer() override;
void internalPresent() override;

View file

@ -58,7 +58,7 @@ glm::mat4 StereoDisplayPlugin::getEyeProjection(Eye eye, const glm::mat4& basePr
static const QString FRAMERATE = DisplayPlugin::MENU_PATH() + ">Framerate";
std::vector<QAction*> _screenActions;
void StereoDisplayPlugin::internalActivate() {
bool StereoDisplayPlugin::internalActivate() {
auto screens = qApp->screens();
_screenActions.resize(screens.size());
for (int i = 0; i < screens.size(); ++i) {
@ -77,7 +77,8 @@ void StereoDisplayPlugin::internalActivate() {
_screen = qApp->primaryScreen();
_container->setFullscreen(_screen);
Parent::internalActivate();
return Parent::internalActivate();
}
void StereoDisplayPlugin::updateScreen() {

View file

@ -29,7 +29,7 @@ public:
// virtual glm::mat4 getEyeToHeadTransform(Eye eye) const override;
protected:
virtual void internalActivate() override;
virtual bool internalActivate() override;
virtual void internalDeactivate() override;
void updateScreen();

View file

@ -421,8 +421,8 @@ const FBXGeometry* EntityTreeRenderer::getGeometryForEntity(EntityItemPointer en
std::dynamic_pointer_cast<RenderableModelEntityItem>(entityItem);
assert(modelEntityItem); // we need this!!!
ModelPointer model = modelEntityItem->getModel(this);
if (model) {
result = &model->getGeometry()->getFBXGeometry();
if (model && model->isLoaded()) {
result = &model->getFBXGeometry();
}
}
return result;
@ -446,11 +446,8 @@ const FBXGeometry* EntityTreeRenderer::getCollisionGeometryForEntity(EntityItemP
std::dynamic_pointer_cast<RenderableModelEntityItem>(entityItem);
if (modelEntityItem->hasCompoundShapeURL()) {
ModelPointer model = modelEntityItem->getModel(this);
if (model) {
const QSharedPointer<NetworkGeometry> collisionNetworkGeometry = model->getCollisionGeometry();
if (collisionNetworkGeometry && collisionNetworkGeometry->isLoaded()) {
result = &collisionNetworkGeometry->getFBXGeometry();
}
if (model && model->isCollisionLoaded()) {
result = &model->getCollisionFBXGeometry();
}
}
}
@ -463,14 +460,17 @@ void EntityTreeRenderer::processEraseMessage(ReceivedMessage& message, const Sha
ModelPointer EntityTreeRenderer::allocateModel(const QString& url, const QString& collisionUrl) {
ModelPointer model = nullptr;
// Make sure we only create and delete models on the thread that owns the EntityTreeRenderer
// Only create and delete models on the thread that owns the EntityTreeRenderer
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "allocateModel", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(ModelPointer, model),
Q_ARG(const QString&, url));
Q_ARG(const QString&, url),
Q_ARG(const QString&, collisionUrl));
return model;
}
model = std::make_shared<Model>(std::make_shared<Rig>());
model->init();
model->setURL(QUrl(url));
@ -478,37 +478,20 @@ ModelPointer EntityTreeRenderer::allocateModel(const QString& url, const QString
return model;
}
ModelPointer EntityTreeRenderer::updateModel(ModelPointer original, const QString& newUrl, const QString& collisionUrl) {
ModelPointer model = nullptr;
// The caller shouldn't call us if the URL doesn't need to change. But if they
// do, we just return their original back to them.
if (!original || (QUrl(newUrl) == original->getURL())) {
return original;
}
// Before we do any creating or deleting, make sure we're on our renderer thread
ModelPointer EntityTreeRenderer::updateModel(ModelPointer model, const QString& newUrl, const QString& collisionUrl) {
// Only create and delete models on the thread that owns the EntityTreeRenderer
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "updateModel", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(ModelPointer, model),
Q_ARG(ModelPointer, original),
Q_ARG(const QString&, newUrl));
Q_ARG(ModelPointer, model),
Q_ARG(const QString&, newUrl),
Q_ARG(const QString&, collisionUrl));
return model;
}
// at this point we know we need to replace the model, and we know we're on the
// correct thread, so we can do all our work.
if (original) {
original.reset(); // delete the old model...
}
// create the model and correctly initialize it with the new url
model = std::make_shared<Model>(std::make_shared<Rig>());
model->init();
model->setURL(QUrl(newUrl));
model->setCollisionModelURL(QUrl(collisionUrl));
return model;
}

View file

@ -69,14 +69,10 @@ void RenderableModelEntityItem::loader() {
_needsModelReload = true;
EntityTreeRenderer* renderer = DependencyManager::get<EntityTreeRenderer>().data();
assert(renderer);
if (!_model || _needsModelReload) {
{
PerformanceTimer perfTimer("getModel");
getModel(renderer);
}
if (_model) {
_model->setURL(getParsedModelURL());
_model->setCollisionModelURL(QUrl(getCompoundShapeURL()));
}
}
void RenderableModelEntityItem::setDimensions(const glm::vec3& value) {
@ -109,8 +105,8 @@ int RenderableModelEntityItem::readEntitySubclassDataFromBuffer(const unsigned c
QVariantMap RenderableModelEntityItem::parseTexturesToMap(QString textures) {
// If textures are unset, revert to original textures
if (textures == "") {
return _originalTexturesMap;
if (textures.isEmpty()) {
return _originalTextures;
}
// Legacy: a ,\n-delimited list of filename:"texturepath"
@ -121,10 +117,11 @@ QVariantMap RenderableModelEntityItem::parseTexturesToMap(QString textures) {
QJsonParseError error;
QJsonDocument texturesJson = QJsonDocument::fromJson(textures.toUtf8(), &error);
if (error.error != QJsonParseError::NoError) {
qCWarning(entitiesrenderer) << "Could not evaluate textures property value:" << _textures;
return _originalTexturesMap;
qCWarning(entitiesrenderer) << "Could not evaluate textures property value:" << textures;
return _originalTextures;
}
return texturesJson.object().toVariantMap();
return texturesJson.toVariant().toMap();
}
void RenderableModelEntityItem::remapTextures() {
@ -135,44 +132,29 @@ void RenderableModelEntityItem::remapTextures() {
if (!_model->isLoaded()) {
return; // nothing to do if the model has not yet loaded
}
auto& geometry = _model->getGeometry()->getGeometry();
if (!_originalTexturesRead) {
const QSharedPointer<NetworkGeometry>& networkGeometry = _model->getGeometry();
if (networkGeometry) {
_originalTextures = networkGeometry->getTextureNames();
_originalTexturesMap = parseTexturesToMap(_originalTextures.join(",\n"));
_originalTexturesRead = true;
}
}
if (_currentTextures == _textures) {
return; // nothing to do if our recently mapped textures match our desired textures
}
// since we're changing here, we need to run through our current texture map
// and any textures in the recently mapped texture, that is not in our desired
// textures, we need to "unset"
QVariantMap currentTextureMap = parseTexturesToMap(_currentTextures);
QVariantMap textureMap = parseTexturesToMap(_textures);
_originalTextures = geometry->getTextures();
_originalTexturesRead = true;
foreach(const QString& key, currentTextureMap.keys()) {
// if the desired texture map (what we're setting the textures to) doesn't
// contain this texture, then remove it by setting the URL to null
if (!textureMap.contains(key)) {
QUrl noURL;
qCDebug(entitiesrenderer) << "Removing texture named" << key << "by replacing it with no URL";
_model->setTextureWithNameToURL(key, noURL);
}
// Default to _originalTextures to avoid remapping immediately and lagging on load
_currentTextures = _originalTextures;
}
// here's where we remap any textures if needed...
foreach(const QString& key, textureMap.keys()) {
QUrl newTextureURL = textureMap[key].toUrl();
qCDebug(entitiesrenderer) << "Updating texture named" << key << "to texture at URL" << newTextureURL;
_model->setTextureWithNameToURL(key, newTextureURL);
auto textures = getTextures();
if (textures == _lastTextures) {
return;
}
_lastTextures = textures;
auto newTextures = parseTexturesToMap(textures);
if (newTextures != _currentTextures) {
geometry->setTextures(newTextures);
_currentTextures = newTextures;
}
_currentTextures = _textures;
}
// TODO: we need a solution for changes to the postion/rotation/etc of a model...
@ -385,13 +367,6 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
if (hasModel()) {
if (_model) {
// check if the URL has changed
auto& currentURL = getParsedModelURL();
if (currentURL != _model->getURL()) {
qCDebug(entitiesrenderer).noquote() << "Updating model URL: " << currentURL.toDisplayString();
_model->setURL(currentURL);
}
render::ScenePointer scene = AbstractViewStateInterface::instance()->getMain3DScene();
// check to see if when we added our models to the scene they were ready, if they were not ready, then
@ -456,6 +431,15 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
}
});
updateModelBounds();
// Check if the URL has changed
// Do this last as the getModel is queued for the next frame,
// and we need to keep state directing the model to reinitialize
auto& currentURL = getParsedModelURL();
if (currentURL != _model->getURL()) {
// Defer setting the url to the render thread
getModel(_myRenderer);
}
}
}
} else {
@ -471,10 +455,8 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
}
ModelPointer RenderableModelEntityItem::getModel(EntityTreeRenderer* renderer) {
ModelPointer result = nullptr;
if (!renderer) {
return result;
return nullptr;
}
// make sure our renderer is setup
@ -489,21 +471,22 @@ ModelPointer RenderableModelEntityItem::getModel(EntityTreeRenderer* renderer) {
_needsModelReload = false; // this is the reload
// if we have a URL, then we will want to end up returning a model...
// If we have a URL, then we will want to end up returning a model...
if (!getModelURL().isEmpty()) {
// if we have a previously allocated model, but its URL doesn't match
// then we need to let our renderer update our model for us.
if (_model && (QUrl(getModelURL()) != _model->getURL() ||
QUrl(getCompoundShapeURL()) != _model->getCollisionURL())) {
result = _model = _myRenderer->updateModel(_model, getModelURL(), getCompoundShapeURL());
// If we don't have a model, allocate one *immediately*
if (!_model) {
_model = _myRenderer->allocateModel(getModelURL(), getCompoundShapeURL());
_needsInitialSimulation = true;
} else if (!_model) { // if we don't yet have a model, then we want our renderer to allocate one
result = _model = _myRenderer->allocateModel(getModelURL(), getCompoundShapeURL());
// If we need to change URLs, update it *after rendering* (to avoid access violations)
} else if ((QUrl(getModelURL()) != _model->getURL() || QUrl(getCompoundShapeURL()) != _model->getCollisionURL())) {
QMetaObject::invokeMethod(_myRenderer, "updateModel", Qt::QueuedConnection,
Q_ARG(ModelPointer, _model),
Q_ARG(const QString&, getModelURL()),
Q_ARG(const QString&, getCompoundShapeURL()));
_needsInitialSimulation = true;
} else { // we already have the model we want...
result = _model;
}
// Else we can just return the _model
// If we have no URL, then we can delete any model we do have...
} else if (_model) {
// remove from scene
render::ScenePointer scene = AbstractViewStateInterface::instance()->getMain3DScene();
@ -513,11 +496,11 @@ ModelPointer RenderableModelEntityItem::getModel(EntityTreeRenderer* renderer) {
// release interest
_myRenderer->releaseModel(_model);
result = _model = nullptr;
_model = nullptr;
_needsInitialSimulation = true;
}
return result;
return _model;
}
bool RenderableModelEntityItem::needsToCallUpdate() const {
@ -526,8 +509,7 @@ bool RenderableModelEntityItem::needsToCallUpdate() const {
void RenderableModelEntityItem::update(const quint64& now) {
if (!_dimensionsInitialized && _model && _model->isActive()) {
const QSharedPointer<NetworkGeometry> renderNetworkGeometry = _model->getGeometry();
if (renderNetworkGeometry && renderNetworkGeometry->isLoaded()) {
if (_model->isLoaded()) {
EntityItemProperties properties;
auto extents = _model->getMeshExtents();
properties.setDimensions(extents.maximum - extents.minimum);
@ -593,13 +575,8 @@ bool RenderableModelEntityItem::isReadyToComputeShape() {
return false;
}
const QSharedPointer<NetworkGeometry> collisionNetworkGeometry = _model->getCollisionGeometry();
const QSharedPointer<NetworkGeometry> renderNetworkGeometry = _model->getGeometry();
if ((collisionNetworkGeometry && collisionNetworkGeometry->isLoaded()) &&
(renderNetworkGeometry && renderNetworkGeometry->isLoaded())) {
if (_model->isLoaded() && _model->isCollisionLoaded()) {
// we have both URLs AND both geometries AND they are both fully loaded.
if (_needsInitialSimulation) {
// the _model's offset will be wrong until _needsInitialSimulation is false
PerformanceTimer perfTimer("_model->simulate");
@ -624,15 +601,12 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& info) {
adjustShapeInfoByRegistration(info);
} else {
updateModelBounds();
const QSharedPointer<NetworkGeometry> collisionNetworkGeometry = _model->getCollisionGeometry();
// should never fall in here when collision model not fully loaded
// hence we assert collisionNetworkGeometry is not NULL
assert(collisionNetworkGeometry);
const FBXGeometry& collisionGeometry = collisionNetworkGeometry->getFBXGeometry();
const QSharedPointer<NetworkGeometry> renderNetworkGeometry = _model->getGeometry();
const FBXGeometry& renderGeometry = renderNetworkGeometry->getFBXGeometry();
// hence we assert that all geometries exist and are loaded
assert(_model->isLoaded() && _model->isCollisionLoaded());
const FBXGeometry& renderGeometry = _model->getFBXGeometry();
const FBXGeometry& collisionGeometry = _model->getCollisionFBXGeometry();
_points.clear();
unsigned int i = 0;
@ -734,10 +708,8 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& info) {
}
bool RenderableModelEntityItem::contains(const glm::vec3& point) const {
if (EntityItem::contains(point) && _model && _model->getCollisionGeometry()) {
const QSharedPointer<NetworkGeometry> collisionNetworkGeometry = _model->getCollisionGeometry();
const FBXGeometry& collisionGeometry = collisionNetworkGeometry->getFBXGeometry();
return collisionGeometry.convexHullContains(worldToEntity(point));
if (EntityItem::contains(point) && _model && _model->isCollisionLoaded()) {
return _model->getCollisionFBXGeometry().convexHullContains(worldToEntity(point));
}
return false;

View file

@ -90,9 +90,9 @@ private:
bool _needsInitialSimulation = true;
bool _needsModelReload = true;
EntityTreeRenderer* _myRenderer = nullptr;
QString _currentTextures;
QStringList _originalTextures;
QVariantMap _originalTexturesMap;
QString _lastTextures;
QVariantMap _currentTextures;
QVariantMap _originalTextures;
bool _originalTexturesRead = false;
QVector<QVector<glm::vec3>> _points;
bool _dimensionsInitialized = true;

View file

@ -55,8 +55,8 @@ bool RenderableWebEntityItem::buildWebSurface(EntityTreeRenderer* renderer) {
qWarning() << "Too many concurrent web views to create new view";
return false;
}
qDebug() << "Building web surface";
++_currentWebCount;
// Save the original GL context, because creating a QML surface will create a new context
QOpenGLContext * currentContext = QOpenGLContext::currentContext();

View file

@ -1010,6 +1010,10 @@ EntityTreePointer EntityItem::getTree() const {
return tree;
}
SpatialParentTree* EntityItem::getParentTree() const {
return getTree().get();
}
bool EntityItem::wantTerseEditLogging() const {
EntityTreePointer tree = getTree();
return tree ? tree->wantTerseEditLogging() : false;
@ -1978,3 +1982,25 @@ void EntityItem::dimensionsChanged() {
requiresRecalcBoxes();
SpatiallyNestable::dimensionsChanged(); // Do what you have to do
}
void EntityItem::globalizeProperties(EntityItemProperties& properties, const QString& messageTemplate, const glm::vec3& offset) const {
bool success;
auto globalPosition = getPosition(success);
if (success) {
properties.setPosition(globalPosition + offset);
properties.setRotation(getRotation());
properties.setDimensions(getDimensions());
// Should we do velocities and accelerations, too? This could end up being quite involved, which is why the method exists.
} else {
properties.setPosition(getQueryAACube().calcCenter() + offset); // best we can do
}
if (!messageTemplate.isEmpty()) {
QString name = properties.getName();
if (name.isEmpty()) {
name = EntityTypes::getEntityTypeName(properties.getType());
}
qCWarning(entities) << messageTemplate.arg(getEntityItemID().toString()).arg(name).arg(properties.getParentID().toString());
}
QUuid empty;
properties.setParentID(empty);
}

View file

@ -86,6 +86,8 @@ public:
/// returns true if something changed
virtual bool setProperties(const EntityItemProperties& properties);
// Update properties with empty parent id and globalized/absolute values (applying offset), and apply (non-empty) log template to args id, name-or-type, parent id.
void globalizeProperties(EntityItemProperties& properties, const QString& messageTemplate = QString(), const glm::vec3& offset = glm::vec3(0.0f)) const;
/// Override this in your derived class if you'd like to be informed when something about the state of the entity
/// has changed. This will be called with properties change or when new data is loaded from a stream
@ -359,6 +361,7 @@ public:
void setPhysicsInfo(void* data) { _physicsInfo = data; }
EntityTreeElementPointer getElement() const { return _element; }
EntityTreePointer getTree() const;
virtual SpatialParentTree* getParentTree() const;
bool wantTerseEditLogging() const;
glm::mat4 getEntityToWorldMatrix() const;

View file

@ -529,9 +529,9 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
COPY_PROPERTY_TO_QSCRIPTVALUE_GETTER_NO_SKIP(boundingBox, boundingBox); // gettable, but not settable
}
QString textureNamesList = _textureNames.join(",\n");
QString textureNamesStr = QJsonDocument::fromVariant(_textureNames).toJson();
if (!skipDefaults) {
COPY_PROPERTY_TO_QSCRIPTVALUE_GETTER_NO_SKIP(originalTextures, textureNamesList); // gettable, but not settable
COPY_PROPERTY_TO_QSCRIPTVALUE_GETTER_NO_SKIP(originalTextures, textureNamesStr); // gettable, but not settable
}
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_PARENT_ID, parentID);

View file

@ -245,8 +245,8 @@ public:
const glm::vec3& getNaturalPosition() const { return _naturalPosition; }
void calculateNaturalPosition(const glm::vec3& min, const glm::vec3& max);
const QStringList& getTextureNames() const { return _textureNames; }
void setTextureNames(const QStringList& value) { _textureNames = value; }
const QVariantMap& getTextureNames() const { return _textureNames; }
void setTextureNames(const QVariantMap& value) { _textureNames = value; }
QString getSimulatorIDAsString() const { return _simulationOwner.getID().toString().mid(1,36).toUpper(); }
@ -297,7 +297,7 @@ private:
// NOTE: The following are pseudo client only properties. They are only used in clients which can access
// properties of model geometry. But these properties are not serialized like other properties.
QVector<SittingPoint> _sittingPoints;
QStringList _textureNames;
QVariantMap _textureNames;
glm::vec3 _naturalDimensions;
glm::vec3 _naturalPosition;

View file

@ -24,7 +24,6 @@
#include "EntitiesLogging.h"
#include "RecurseOctreeToMapOperator.h"
#include "LogHandler.h"
#include "RemapIDOperator.h"
static const quint64 DELETED_ENTITIES_EXTRA_USECS_TO_CONSIDER = USECS_PER_MSEC * 50;
@ -1317,42 +1316,59 @@ QVector<EntityItemID> EntityTree::sendEntities(EntityEditPacketSender* packetSen
float x, float y, float z) {
SendEntitiesOperationArgs args;
args.packetSender = packetSender;
args.localTree = localTree;
args.ourTree = this;
args.otherTree = localTree;
args.root = glm::vec3(x, y, z);
QVector<EntityItemID> newEntityIDs;
args.newEntityIDs = &newEntityIDs;
// If this is called repeatedly (e.g., multiple pastes with the same data), the new elements will clash unless we use new identifiers.
// We need to keep a map so that we can map parent identifiers correctly.
QHash<EntityItemID, EntityItemID> map;
args.map = &map;
recurseTreeWithOperation(sendEntitiesOperation, &args);
packetSender->releaseQueuedMessages();
return newEntityIDs;
return map.values().toVector();
}
bool EntityTree::sendEntitiesOperation(OctreeElementPointer element, void* extraData) {
SendEntitiesOperationArgs* args = static_cast<SendEntitiesOperationArgs*>(extraData);
EntityTreeElementPointer entityTreeElement = std::static_pointer_cast<EntityTreeElement>(element);
entityTreeElement->forEachEntity([&](EntityItemPointer entityItem) {
EntityItemID newID(QUuid::createUuid());
args->newEntityIDs->append(newID);
EntityItemProperties properties = entityItem->getProperties();
properties.setPosition(properties.getPosition() + args->root);
std::function<const EntityItemID(EntityItemPointer&)> getMapped = [&](EntityItemPointer& item) -> const EntityItemID {
EntityItemID oldID = item->getEntityItemID();
if (args->map->contains(oldID)) { // Already been handled (e.g., as a parent of somebody that we've processed).
return args->map->value(oldID);
}
EntityItemID newID = QUuid::createUuid();
EntityItemProperties properties = item->getProperties();
EntityItemID oldParentID = properties.getParentID();
if (oldParentID.isInvalidID()) { // no parent
properties.setPosition(properties.getPosition() + args->root);
} else {
EntityItemPointer parentEntity = args->ourTree->findEntityByEntityItemID(oldParentID);
if (parentEntity) { // map the parent
// Warning: (non-tail) recursion of getMapped could blow the call stack if the parent hierarchy is VERY deep.
properties.setParentID(getMapped(parentEntity));
// But do not add root offset in this case.
} else { // Should not happen, but let's try to be helpful...
item->globalizeProperties(properties, "Cannot find %3 parent of %2 %1", args->root);
}
}
properties.markAllChanged(); // so the entire property set is considered new, since we're making a new entity
// queue the packet to send to the server
args->packetSender->queueEditEntityMessage(PacketType::EntityAdd, newID, properties);
// also update the local tree instantly (note: this is not our tree, but an alternate tree)
if (args->localTree) {
args->localTree->withWriteLock([&] {
args->localTree->addEntity(newID, properties);
if (args->otherTree) {
args->otherTree->withWriteLock([&] {
args->otherTree->addEntity(newID, properties);
});
}
});
return true;
}
args->map->insert(oldID, newID);
return newID;
};
void EntityTree::remapIDs() {
RemapIDOperator theOperator;
recurseTreeWithOperator(&theOperator);
entityTreeElement->forEachEntity(getMapped);
return true;
}
bool EntityTree::writeToMap(QVariantMap& entityDescription, OctreeElementPointer element, bool skipDefaultValues,
@ -1393,7 +1409,6 @@ bool EntityTree::readFromMap(QVariantMap& map) {
qCDebug(entities) << "adding Entity failed:" << entityItemID << properties.getType();
}
}
return true;
}

View file

@ -16,6 +16,7 @@
#include <QVector>
#include <Octree.h>
#include <SpatialParentFinder.h>
class EntityTree;
typedef std::shared_ptr<EntityTree> EntityTreePointer;
@ -46,13 +47,14 @@ public:
class SendEntitiesOperationArgs {
public:
glm::vec3 root;
EntityTreePointer localTree;
EntityTree* ourTree;
EntityTreePointer otherTree;
EntityEditPacketSender* packetSender;
QVector<EntityItemID>* newEntityIDs;
QHash<EntityItemID, EntityItemID>* map;
};
class EntityTree : public Octree {
class EntityTree : public Octree, public SpatialParentTree {
Q_OBJECT
public:
EntityTree(bool shouldReaverage = false);
@ -125,6 +127,7 @@ public:
EntityItemPointer findClosestEntity(glm::vec3 position, float targetRadius);
EntityItemPointer findEntityByID(const QUuid& id);
EntityItemPointer findEntityByEntityItemID(const EntityItemID& entityID);
virtual SpatiallyNestablePointer findByID(const QUuid& id) { return findEntityByID(id); }
EntityItemID assignEntityID(const EntityItemID& entityItemID); /// Assigns a known ID for a creator token ID
@ -200,8 +203,6 @@ public:
bool wantTerseEditLogging() const { return _wantTerseEditLogging; }
void setWantTerseEditLogging(bool value) { _wantTerseEditLogging = value; }
void remapIDs();
virtual bool writeToMap(QVariantMap& entityDescription, OctreeElementPointer element, bool skipDefaultValues,
bool skipThoseWithBadParents) override;
virtual bool readFromMap(QVariantMap& entityDescription) override;

View file

@ -42,6 +42,17 @@ ModelEntityItem::ModelEntityItem(const EntityItemID& entityItemID) : EntityItem(
_color[0] = _color[1] = _color[2] = 0;
}
const QString ModelEntityItem::getTextures() const {
QReadLocker locker(&_texturesLock);
auto textures = _textures;
return textures;
}
void ModelEntityItem::setTextures(const QString& textures) {
QWriteLocker locker(&_texturesLock);
_textures = textures;
}
EntityItemProperties ModelEntityItem::getProperties(EntityPropertyFlags desiredProperties) const {
EntityItemProperties properties = EntityItem::getProperties(desiredProperties); // get the properties from our base class
COPY_ENTITY_PROPERTY_TO_PROPERTIES(color, getXColor);

View file

@ -110,8 +110,8 @@ public:
float getAnimationFPS() const { return _animationLoop.getFPS(); }
static const QString DEFAULT_TEXTURES;
const QString& getTextures() const { return _textures; }
void setTextures(const QString& textures) { _textures = textures; }
const QString getTextures() const;
void setTextures(const QString& textures);
virtual bool shouldBePhysical() const;
@ -159,7 +159,9 @@ protected:
AnimationPropertyGroup _animationProperties;
AnimationLoop _animationLoop;
mutable QReadWriteLock _texturesLock;
QString _textures;
ShapeType _shapeType = SHAPE_TYPE_NONE;
// used on client side

View file

@ -561,6 +561,12 @@ bool ParticleEffectEntityItem::needsToCallUpdate() const {
void ParticleEffectEntityItem::update(const quint64& now) {
// we check for 'now' in the past in case users set their clock backward
if (now < _lastSimulated) {
_lastSimulated = now;
return;
}
float deltaTime = (float)(now - _lastSimulated) / (float)USECS_PER_SECOND;
_lastSimulated = now;

View file

@ -1,33 +0,0 @@
//
// RemapIDOperator.cpp
// libraries/entities/src
//
// Created by Seth Alves on 2015-12-6.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "EntityTree.h"
#include "RemapIDOperator.h"
QUuid RemapIDOperator::remap(const QUuid& oldID) {
if (oldID.isNull()) {
return oldID;
}
if (!_oldToNew.contains(oldID)) {
_oldToNew[oldID] = QUuid::createUuid();
}
return _oldToNew[oldID];
}
bool RemapIDOperator::postRecursion(OctreeElementPointer element) {
EntityTreeElementPointer entityTreeElement = std::static_pointer_cast<EntityTreeElement>(element);
entityTreeElement->forEachEntity([&](EntityItemPointer entityItem) {
entityItem->setID(remap(entityItem->getID()));
entityItem->setParentID(remap(entityItem->getParentID()));
});
return true;
}

View file

@ -1,30 +0,0 @@
//
// RemapIDOperator.h
// libraries/entities/src
//
// Created by Seth Alves on 2015-12-6.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_RemapIDOperator_h
#define hifi_RemapIDOperator_h
#include "Octree.h"
// this will change all the IDs in an EntityTree. Parent/Child relationships are maintained.
class RemapIDOperator : public RecurseOctreeOperator {
public:
RemapIDOperator() : RecurseOctreeOperator() {}
~RemapIDOperator() {}
virtual bool preRecursion(OctreeElementPointer element) { return true; }
virtual bool postRecursion(OctreeElementPointer element);
private:
QUuid remap(const QUuid& oldID);
QHash<QUuid, QUuid> _oldToNew;
};
#endif // hifi_RemapIDOperator_h

View file

@ -50,7 +50,9 @@ void GLWidget::initializeGL() {
// TODO: write the proper code for linux
makeCurrent();
#if defined(Q_OS_WIN)
_vsyncSupported = context()->contextHandle()->hasExtension("WGL_EXT_swap_control");;
if (isValid() && context() && context()->contextHandle()) {
_vsyncSupported = context()->contextHandle()->hasExtension("WGL_EXT_swap_control");;
}
#endif
}

View file

@ -70,7 +70,7 @@ public:
virtual bool event(QEvent *e) override;
protected:
class Queue : public QQueue<QEvent*> {
class Queue : private QQueue<QEvent*> {
public:
void add(QEvent::Type type);
QEvent* take();
@ -134,12 +134,14 @@ QEvent* OffscreenQmlRenderThread::Queue::take() {
}
OffscreenQmlRenderThread::OffscreenQmlRenderThread(OffscreenQmlSurface* surface, QOpenGLContext* shareContext) : _surface(surface) {
qDebug() << "Building QML Renderer: creating context";
if (!_canvas.create(shareContext)) {
static const char* error = "Failed to create OffscreenGLCanvas";
qWarning() << error;
throw error;
};
qDebug() << "Building QML Renderer: creating render control";
_renderControl = new QMyQuickRenderControl();
QQuickWindow::setDefaultAlphaBuffer(true);
// Create a QQuickWindow that is associated with our render control.
@ -147,19 +149,25 @@ OffscreenQmlRenderThread::OffscreenQmlRenderThread(OffscreenQmlSurface* surface,
// NOTE: Must be created on the main thread so that OffscreenQmlSurface can send it events
// NOTE: Must be created on the rendering thread or it will refuse to render,
// so we wait until after its ctor to move object/context to this thread.
qDebug() << "Building QML Renderer: creating window";
_quickWindow = new QQuickWindow(_renderControl);
_quickWindow->setColor(QColor(255, 255, 255, 0));
_quickWindow->setFlags(_quickWindow->flags() | static_cast<Qt::WindowFlags>(Qt::WA_TranslucentBackground));
// We can prepare, but we must wait to start() the thread until after the ctor
qDebug() << "Building QML Renderer: moving to own thread";
_renderControl->prepareThread(this);
_canvas.getContextObject()->moveToThread(this);
moveToThread(this);
qDebug() << "Building QML Renderer: complete";
_queue.add(INIT);
}
void OffscreenQmlRenderThread::run() {
qDebug() << "Starting QML Renderer thread";
while (!_quit) {
QEvent* e = _queue.take();
event(e);
@ -190,34 +198,32 @@ void OffscreenQmlRenderThread::setupFbo() {
using namespace oglplus;
_textures.setSize(_size);
// Before making any ogl calls, clear any outstanding errors
// FIXME: Something upstream is polluting the context with a GL_INVALID_ENUM,
// likely from glewExperimental = true
GLenum err = glGetError();
if (err != GL_NO_ERROR) {
qDebug() << "Clearing outstanding GL error to set up QML FBO:" << glewGetErrorString(err);
try {
_depthStencil.reset(new Renderbuffer());
Context::Bound(Renderbuffer::Target::Renderbuffer, *_depthStencil)
.Storage(
PixelDataInternalFormat::DepthComponent,
_size.x, _size.y);
_fbo.reset(new Framebuffer());
_fbo->Bind(Framebuffer::Target::Draw);
_fbo->AttachRenderbuffer(Framebuffer::Target::Draw,
FramebufferAttachment::Depth, *_depthStencil);
DefaultFramebuffer().Bind(Framebuffer::Target::Draw);
} catch (oglplus::Error& error) {
qWarning() << "OpenGL error in QML render setup: " << error.what();
}
_depthStencil.reset(new Renderbuffer());
Context::Bound(Renderbuffer::Target::Renderbuffer, *_depthStencil)
.Storage(
PixelDataInternalFormat::DepthComponent,
_size.x, _size.y);
_fbo.reset(new Framebuffer());
_fbo->Bind(Framebuffer::Target::Draw);
_fbo->AttachRenderbuffer(Framebuffer::Target::Draw,
FramebufferAttachment::Depth, *_depthStencil);
DefaultFramebuffer().Bind(Framebuffer::Target::Draw);
}
void OffscreenQmlRenderThread::init() {
qDebug() << "Initializing QML Renderer";
connect(_renderControl, &QQuickRenderControl::renderRequested, _surface, &OffscreenQmlSurface::requestRender);
connect(_renderControl, &QQuickRenderControl::sceneChanged, _surface, &OffscreenQmlSurface::requestUpdate);
if (!_canvas.makeCurrent()) {
// Failed to make GL context current, this OffscreenQmlSurface is basically dead
qWarning("Failed to make context current on QML Renderer Thread");
_quit = true;
return;
}
@ -299,10 +305,21 @@ void OffscreenQmlRenderThread::render() {
try {
PROFILE_RANGE("qml_render")
TexturePtr texture = _textures.getNextTexture();
_fbo->Bind(Framebuffer::Target::Draw);
_fbo->AttachTexture(Framebuffer::Target::Draw, FramebufferAttachment::Color, *texture, 0);
_fbo->Complete(Framebuffer::Target::Draw);
TexturePtr texture = _textures.getNextTexture();
try {
_fbo->Bind(Framebuffer::Target::Draw);
_fbo->AttachTexture(Framebuffer::Target::Draw, FramebufferAttachment::Color, *texture, 0);
_fbo->Complete(Framebuffer::Target::Draw);
} catch (oglplus::Error& error) {
qWarning() << "OpenGL error in QML render: " << error.what();
// In case we are failing from a failed setupFbo, reset fbo before next render
setupFbo();
throw;
}
{
PROFILE_RANGE("qml_render->rendercontrol")
_renderControl->render();
@ -311,13 +328,14 @@ void OffscreenQmlRenderThread::render() {
// for now just clear the errors
glGetError();
}
// FIXME probably unecessary
DefaultFramebuffer().Bind(Framebuffer::Target::Draw);
_quickWindow->resetOpenGLState();
_escrow.submit(GetName(*texture));
_lastRenderTime = usecTimestampNow();
} catch (std::runtime_error& error) {
qWarning() << "Failed to render QML " << error.what();
qWarning() << "Failed to render QML: " << error.what();
}
}
@ -352,6 +370,8 @@ void OffscreenQmlSurface::onAboutToQuit() {
}
void OffscreenQmlSurface::create(QOpenGLContext* shareContext) {
qDebug() << "Building QML surface";
_renderer = new OffscreenQmlRenderThread(this, shareContext);
_renderer->moveToThread(_renderer);
_renderer->setObjectName("QML Renderer Thread");

View file

@ -22,11 +22,26 @@ OpenGLVersionChecker::OpenGLVersionChecker(int& argc, char** argv) :
{
}
bool OpenGLVersionChecker::isValidVersion() {
bool valid = true;
QString OpenGLVersionChecker::checkVersion(bool& valid, bool& override) {
valid = true;
override = false;
// Retrieve OpenGL version
GLWidget* glWidget = new GLWidget();
valid = glWidget->isValid();
// Inform user if no OpenGL support
if (!valid) {
QMessageBox messageBox;
messageBox.setWindowTitle("Missing OpenGL Support");
messageBox.setIcon(QMessageBox::Warning);
messageBox.setText(QString().sprintf("Your system does not support OpenGL, Interface cannot run."));
messageBox.setInformativeText("Press OK to exit.");
messageBox.setStandardButtons(QMessageBox::Ok);
messageBox.setDefaultButton(QMessageBox::Ok);
messageBox.exec();
return QString();
}
// Retrieve OpenGL version
glWidget->initializeGL();
QString glVersion = QString((const char*)glGetString(GL_VERSION));
delete glWidget;
@ -54,8 +69,8 @@ bool OpenGLVersionChecker::isValidVersion() {
messageBox.setInformativeText("Press OK to exit; Ignore to continue.");
messageBox.setStandardButtons(QMessageBox::Ok | QMessageBox::Ignore);
messageBox.setDefaultButton(QMessageBox::Ok);
valid = messageBox.exec() == QMessageBox::Ignore;
override = messageBox.exec() == QMessageBox::Ignore;
}
return valid;
return glVersion;
}

View file

@ -19,7 +19,7 @@ class OpenGLVersionChecker : public QApplication {
public:
OpenGLVersionChecker(int& argc, char** argv);
static bool isValidVersion();
static QString checkVersion(bool& valid, bool& override);
};
#endif // hifi_OpenGLVersionChecker_h

View file

@ -74,6 +74,11 @@ void Context::downloadFramebuffer(const FramebufferPointer& srcFramebuffer, cons
_backend->downloadFramebuffer(srcFramebuffer, region, destImage);
}
void Context::getStats(ContextStats& stats) const {
_backend->getStats(stats);
}
const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const Transform& xformView) const {
_projectionInverse = glm::inverse(_projection);
@ -102,3 +107,68 @@ Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const S
return result;
}
// Counters for Buffer and Texture usage in GPU/Context
std::atomic<uint32_t> Context::_bufferGPUCount{ 0 };
std::atomic<Buffer::Size> Context::_bufferGPUMemoryUsage{ 0 };
std::atomic<uint32_t> Context::_textureGPUCount{ 0 };
std::atomic<Texture::Size> Context::_textureGPUMemoryUsage{ 0 };
void Context::incrementBufferGPUCount() {
_bufferGPUCount++;
}
void Context::decrementBufferGPUCount() {
_bufferGPUCount--;
}
void Context::updateBufferGPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
if (prevObjectSize == newObjectSize) {
return;
}
if (newObjectSize > prevObjectSize) {
_bufferGPUMemoryUsage.fetch_add(newObjectSize - prevObjectSize);
} else {
_bufferGPUMemoryUsage.fetch_sub(prevObjectSize - newObjectSize);
}
}
void Context::incrementTextureGPUCount() {
_textureGPUCount++;
}
void Context::decrementTextureGPUCount() {
_textureGPUCount--;
}
void Context::updateTextureGPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
if (prevObjectSize == newObjectSize) {
return;
}
if (newObjectSize > prevObjectSize) {
_textureGPUMemoryUsage.fetch_add(newObjectSize - prevObjectSize);
} else {
_textureGPUMemoryUsage.fetch_sub(prevObjectSize - newObjectSize);
}
}
uint32_t Context::getBufferGPUCount() {
return _bufferGPUCount.load();
}
Context::Size Context::getBufferGPUMemoryUsage() {
return _bufferGPUMemoryUsage.load();
}
uint32_t Context::getTextureGPUCount() {
return _textureGPUCount.load();
}
Context::Size Context::getTextureGPUMemoryUsage() {
return _textureGPUMemoryUsage.load();
}
void Backend::incrementBufferGPUCount() { Context::incrementBufferGPUCount(); }
void Backend::decrementBufferGPUCount() { Context::decrementBufferGPUCount(); }
void Backend::updateBufferGPUMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize) { Context::updateBufferGPUMemoryUsage(prevObjectSize, newObjectSize); }
void Backend::incrementTextureGPUCount() { Context::incrementTextureGPUCount(); }
void Backend::decrementTextureGPUCount() { Context::decrementTextureGPUCount(); }
void Backend::updateTextureGPUMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize) { Context::updateTextureGPUMemoryUsage(prevObjectSize, newObjectSize); }

View file

@ -27,6 +27,22 @@ class QImage;
namespace gpu {
struct ContextStats {
public:
int _ISNumFormatChanges = 0;
int _ISNumInputBufferChanges = 0;
int _ISNumIndexBufferChanges = 0;
int _RSNumTextureBounded = 0;
int _DSNumAPIDrawcalls = 0;
int _DSNumDrawcalls = 0;
int _DSNumTriangles = 0;
ContextStats() {}
ContextStats(const ContextStats& stats) = default;
};
struct StereoState {
bool _enable{ false };
bool _skybox{ false };
@ -100,13 +116,27 @@ public:
return reinterpret_cast<T*>(object.gpuObject.getGPUObject());
}
void getStats(ContextStats& stats) const { stats = _stats; }
// These should only be accessed by Backend implementation to repport the buffer and texture allocations,
// they are NOT public calls
static void incrementBufferGPUCount();
static void decrementBufferGPUCount();
static void updateBufferGPUMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize);
static void incrementTextureGPUCount();
static void decrementTextureGPUCount();
static void updateTextureGPUMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize);
protected:
StereoState _stereo;
ContextStats _stats;
};
class Context {
public:
using Size = Resource::Size;
typedef Backend* (*CreateBackend)();
typedef bool (*MakeProgram)(Shader& shader, const Shader::BindingSet& bindings);
@ -125,6 +155,7 @@ public:
~Context();
void render(Batch& batch);
void enableStereo(bool enable = true);
bool isStereo();
void setStereoProjections(const mat4 eyeProjections[2]);
@ -137,6 +168,16 @@ public:
// It s here for convenience to easily capture a snapshot
void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage);
// Repporting stats of the context
void getStats(ContextStats& stats) const;
static uint32_t getBufferGPUCount();
static Size getBufferGPUMemoryUsage();
static uint32_t getTextureGPUCount();
static Size getTextureGPUMemoryUsage();
protected:
Context(const Context& context);
@ -153,6 +194,23 @@ protected:
static std::once_flag _initialized;
friend class Shader;
// These should only be accessed by the Backend, they are NOT public calls
static void incrementBufferGPUCount();
static void decrementBufferGPUCount();
static void updateBufferGPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
static void incrementTextureGPUCount();
static void decrementTextureGPUCount();
static void updateTextureGPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
// Buffer and Texture Counters
static std::atomic<uint32_t> _bufferGPUCount;
static std::atomic<Size> _bufferGPUMemoryUsage;
static std::atomic<uint32_t> _textureGPUCount;
static std::atomic<Size> _textureGPUMemoryUsage;
friend class Backend;
};
typedef std::shared_ptr<Context> ContextPointer;

View file

@ -324,7 +324,11 @@ void GLBackend::do_draw(Batch& batch, size_t paramOffset) {
uint32 numVertices = batch._params[paramOffset + 1]._uint;
uint32 startVertex = batch._params[paramOffset + 0]._uint;
glDrawArrays(mode, startVertex, numVertices);
(void) CHECK_GL_ERROR();
_stats._DSNumTriangles += numVertices / 3;
_stats._DSNumDrawcalls++;
_stats._DSNumAPIDrawcalls++;
(void)CHECK_GL_ERROR();
}
void GLBackend::do_drawIndexed(Batch& batch, size_t paramOffset) {
@ -339,6 +343,10 @@ void GLBackend::do_drawIndexed(Batch& batch, size_t paramOffset) {
GLvoid* indexBufferByteOffset = reinterpret_cast<GLvoid*>(startIndex * typeByteSize + _input._indexBufferOffset);
glDrawElements(mode, numIndices, glType, indexBufferByteOffset);
_stats._DSNumTriangles += numIndices / 3;
_stats._DSNumDrawcalls++;
_stats._DSNumAPIDrawcalls++;
(void) CHECK_GL_ERROR();
}
@ -350,6 +358,10 @@ void GLBackend::do_drawInstanced(Batch& batch, size_t paramOffset) {
uint32 startVertex = batch._params[paramOffset + 1]._uint;
glDrawArraysInstancedARB(mode, startVertex, numVertices, numInstances);
_stats._DSNumTriangles += (numInstances * numVertices) / 3;
_stats._DSNumDrawcalls += numInstances;
_stats._DSNumAPIDrawcalls++;
(void) CHECK_GL_ERROR();
}
@ -372,6 +384,10 @@ void GLBackend::do_drawIndexedInstanced(Batch& batch, size_t paramOffset) {
glDrawElementsInstanced(mode, numIndices, glType, indexBufferByteOffset, numInstances);
Q_UNUSED(startInstance);
#endif
_stats._DSNumTriangles += (numInstances * numIndices) / 3;
_stats._DSNumDrawcalls += numInstances;
_stats._DSNumAPIDrawcalls++;
(void)CHECK_GL_ERROR();
}
@ -382,6 +398,9 @@ void GLBackend::do_multiDrawIndirect(Batch& batch, size_t paramOffset) {
GLenum mode = _primitiveToGLmode[(Primitive)batch._params[paramOffset + 1]._uint];
glMultiDrawArraysIndirect(mode, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
_stats._DSNumDrawcalls += commandCount;
_stats._DSNumAPIDrawcalls++;
#else
// FIXME implement the slow path
#endif
@ -396,6 +415,8 @@ void GLBackend::do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) {
GLenum indexType = _elementTypeToGLType[_input._indexBufferType];
glMultiDrawElementsIndirect(mode, indexType, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
_stats._DSNumDrawcalls += commandCount;
_stats._DSNumAPIDrawcalls++;
#else
// FIXME implement the slow path
#endif

View file

@ -67,6 +67,8 @@ public:
GLBuffer();
~GLBuffer();
void setSize(GLuint size);
};
static GLBuffer* syncGPUObject(const Buffer& buffer);
static GLuint getBufferID(const Buffer& buffer);
@ -77,10 +79,15 @@ public:
Stamp _contentStamp;
GLuint _texture;
GLenum _target;
GLuint _size;
GLTexture();
~GLTexture();
void setSize(GLuint size);
GLuint size() const { return _size; }
private:
GLuint _size;
};
static GLTexture* syncGPUObject(const Texture& texture);
static GLuint getTextureID(const TexturePointer& texture, bool sync = true);
@ -151,7 +158,6 @@ public:
~GLState();
// The state commands to reset to default,
// WARNING depending on the order of the State::Field enum
static const Commands _resetStateCommands;
friend class GLBackend;
@ -230,26 +236,11 @@ public:
void do_setStateBlend(State::BlendFunction blendFunction);
void do_setStateColorWriteMask(uint32 mask);
// Repporting stats of the context
class Stats {
public:
int _ISNumFormatChanges = 0;
int _ISNumInputBufferChanges = 0;
int _ISNumIndexBufferChanges = 0;
Stats() {}
Stats(const Stats& stats) = default;
};
void getStats(Stats& stats) const { stats = _stats; }
protected:
void renderPassTransfer(Batch& batch);
void renderPassDraw(Batch& batch);
Stats _stats;
// Draw Stage
void do_draw(Batch& batch, size_t paramOffset);
void do_drawIndexed(Batch& batch, size_t paramOffset);

View file

@ -16,12 +16,21 @@ GLBackend::GLBuffer::GLBuffer() :
_stamp(0),
_buffer(0),
_size(0)
{}
{
Backend::incrementBufferGPUCount();
}
GLBackend::GLBuffer::~GLBuffer() {
if (_buffer != 0) {
glDeleteBuffers(1, &_buffer);
}
Backend::updateBufferGPUMemoryUsage(_size, 0);
Backend::decrementBufferGPUCount();
}
void GLBackend::GLBuffer::setSize(GLuint size) {
Backend::updateBufferGPUMemoryUsage(_size, size);
_size = size;
}
GLBackend::GLBuffer* GLBackend::syncGPUObject(const Buffer& buffer) {
@ -46,7 +55,7 @@ GLBackend::GLBuffer* GLBackend::syncGPUObject(const Buffer& buffer) {
glBufferData(GL_ARRAY_BUFFER, buffer.getSysmem().getSize(), buffer.getSysmem().readData(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
object->_stamp = buffer.getSysmem().getStamp();
object->_size = (GLuint)buffer.getSysmem().getSize();
object->setSize((GLuint)buffer.getSysmem().getSize());
//}
(void) CHECK_GL_ERROR();

View file

@ -251,6 +251,9 @@ void GLBackend::do_setResourceTexture(Batch& batch, size_t paramOffset) {
return;
}
// One more True texture bound
_stats._RSNumTextureBounded++;
// Always make sure the GLObject is in sync
GLTexture* object = GLBackend::syncGPUObject(*resourceTexture);
if (object) {

View file

@ -35,6 +35,7 @@ const GLBackend::GLState::Commands makeResetStateCommands();
const GLBackend::GLState::Commands GLBackend::GLState::_resetStateCommands = makeResetStateCommands();
// NOTE: This must stay in sync with the ordering of the State::Field enum
const GLBackend::GLState::Commands makeResetStateCommands() {
// Since State::DEFAULT is a static defined in another .cpp the initialisation order is random
// and we have a 50/50 chance that State::DEFAULT is not yet initialized.
@ -69,9 +70,9 @@ const GLBackend::GLState::Commands makeResetStateCommands() {
CommandPointer(stencilCommand),
CommandPointer(stencilCommand),
std::make_shared<Command1B>(&GLBackend::do_setStateAlphaToCoverageEnable, DEFAULT.alphaToCoverageEnable),
std::make_shared<Command1U>(&GLBackend::do_setStateSampleMask, DEFAULT.sampleMask),
std::make_shared<Command1B>(&GLBackend::do_setStateAlphaToCoverageEnable, DEFAULT.alphaToCoverageEnable),
std::make_shared<CommandBlend>(&GLBackend::do_setStateBlend, DEFAULT.blendFunction),
@ -609,6 +610,8 @@ void GLBackend::do_setStateDepthBias(Vec2 bias) {
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_POLYGON_OFFSET_POINT);
}
(void) CHECK_GL_ERROR();
_pipeline._stateCache.depthBias = bias.x;
_pipeline._stateCache.depthBiasSlopeScale = bias.y;
}
@ -689,6 +692,7 @@ void GLBackend::do_setStateAlphaToCoverageEnable(bool enable) {
glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE);
}
(void) CHECK_GL_ERROR();
_pipeline._stateCache.alphaToCoverageEnable = enable;
}
}
@ -702,6 +706,7 @@ void GLBackend::do_setStateSampleMask(uint32 mask) {
glEnable(GL_SAMPLE_MASK);
glSampleMaski(0, mask);
}
(void) CHECK_GL_ERROR();
#endif
_pipeline._stateCache.sampleMask = mask;
}
@ -742,10 +747,10 @@ void GLBackend::do_setStateBlend(State::BlendFunction function) {
glBlendFuncSeparate(BLEND_ARGS[function.getSourceColor()], BLEND_ARGS[function.getDestinationColor()],
BLEND_ARGS[function.getSourceAlpha()], BLEND_ARGS[function.getDestinationAlpha()]);
(void) CHECK_GL_ERROR();
} else {
glDisable(GL_BLEND);
}
(void) CHECK_GL_ERROR();
_pipeline._stateCache.blendFunction = function;
}
@ -757,6 +762,7 @@ void GLBackend::do_setStateColorWriteMask(uint32 mask) {
mask & State::ColorMask::WRITE_GREEN,
mask & State::ColorMask::WRITE_BLUE,
mask & State::ColorMask::WRITE_ALPHA );
(void) CHECK_GL_ERROR();
_pipeline._stateCache.colorWriteMask = mask;
}
@ -764,7 +770,6 @@ void GLBackend::do_setStateColorWriteMask(uint32 mask) {
void GLBackend::do_setStateBlendFactor(Batch& batch, size_t paramOffset) {
Vec4 factor(batch._params[paramOffset + 0]._float,
batch._params[paramOffset + 1]._float,
batch._params[paramOffset + 2]._float,

View file

@ -19,12 +19,21 @@ GLBackend::GLTexture::GLTexture() :
_texture(0),
_target(GL_TEXTURE_2D),
_size(0)
{}
{
Backend::incrementTextureGPUCount();
}
GLBackend::GLTexture::~GLTexture() {
if (_texture != 0) {
glDeleteTextures(1, &_texture);
}
Backend::updateTextureGPUMemoryUsage(_size, 0);
Backend::decrementTextureGPUCount();
}
void GLBackend::GLTexture::setSize(GLuint size) {
Backend::updateTextureGPUMemoryUsage(_size, size);
_size = size;
}
class GLTexelFormat {
@ -216,6 +225,12 @@ public:
}
break;
case gpu::R11G11B10:
texel.format = GL_RGB;
// the type should be float
texel.internalFormat = GL_R11F_G11F_B10F;
break;
case gpu::DEPTH:
texel.format = GL_DEPTH_COMPONENT; // It's depth component to load it
texel.internalFormat = GL_DEPTH_COMPONENT;
@ -293,11 +308,6 @@ public:
case gpu::SRGBA:
texel.internalFormat = GL_SRGB; // standard 2.2 gamma correction color
break;
case gpu::R11G11B10: {
// the type should be float
texel.internalFormat = GL_R11F_G11F_B10F;
break;
}
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
@ -427,8 +437,8 @@ GLBackend::GLTexture* GLBackend::syncGPUObject(const Texture& texture) {
if (needUpdate) {
if (texture.isStoredMipFaceAvailable(0)) {
Texture::PixelsPointer mip = texture.accessStoredMipFace(0);
const GLvoid* bytes = mip->_sysmem.read<Byte>();
Element srcFormat = mip->_format;
const GLvoid* bytes = mip->readData();
Element srcFormat = mip->getFormat();
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat(), srcFormat);
@ -458,8 +468,8 @@ GLBackend::GLTexture* GLBackend::syncGPUObject(const Texture& texture) {
if (texture.isStoredMipFaceAvailable(0)) {
Texture::PixelsPointer mip = texture.accessStoredMipFace(0);
bytes = mip->_sysmem.read<Byte>();
srcFormat = mip->_format;
bytes = mip->readData();
srcFormat = mip->getFormat();
object->_contentStamp = texture.getDataStamp();
}
@ -483,7 +493,7 @@ GLBackend::GLTexture* GLBackend::syncGPUObject(const Texture& texture) {
object->_storageStamp = texture.getStamp();
object->_contentStamp = texture.getDataStamp();
object->_size = (GLuint)texture.getSize();
object->setSize((GLuint)texture.getSize());
}
glBindTexture(GL_TEXTURE_2D, boundTex);
@ -507,11 +517,11 @@ GLBackend::GLTexture* GLBackend::syncGPUObject(const Texture& texture) {
for (int f = 0; f < NUM_FACES; f++) {
if (texture.isStoredMipFaceAvailable(0, f)) {
Texture::PixelsPointer mipFace = texture.accessStoredMipFace(0, f);
Element srcFormat = mipFace->_format;
Element srcFormat = mipFace->getFormat();
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat(), srcFormat);
glTexSubImage2D(FACE_LAYOUT[f], 0, texelFormat.internalFormat, texture.getWidth(), texture.getWidth(), 0,
texelFormat.format, texelFormat.type, (GLvoid*) (mipFace->_sysmem.read<Byte>()));
texelFormat.format, texelFormat.type, (GLvoid*) (mipFace->readData()));
// At this point the mip pixels have been loaded, we can notify
texture.notifyMipFaceGPULoaded(0, f);
@ -536,11 +546,11 @@ GLBackend::GLTexture* GLBackend::syncGPUObject(const Texture& texture) {
for (int f = 0; f < NUM_FACES; f++) {
if (texture.isStoredMipFaceAvailable(0, f)) {
Texture::PixelsPointer mipFace = texture.accessStoredMipFace(0, f);
Element srcFormat = mipFace->_format;
Element srcFormat = mipFace->getFormat();
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat(), srcFormat);
glTexImage2D(FACE_LAYOUT[f], 0, texelFormat.internalFormat, texture.getWidth(), texture.getWidth(), 0,
texelFormat.format, texelFormat.type, (GLvoid*) (mipFace->_sysmem.read<Byte>()));
texelFormat.format, texelFormat.type, (GLvoid*) (mipFace->readData()));
// At this point the mip pixels have been loaded, we can notify
texture.notifyMipFaceGPULoaded(0, f);
@ -561,7 +571,7 @@ GLBackend::GLTexture* GLBackend::syncGPUObject(const Texture& texture) {
object->_storageStamp = texture.getStamp();
object->_contentStamp = texture.getDataStamp();
object->_size = (GLuint)texture.getSize();
object->setSize((GLuint)texture.getSize());
}
glBindTexture(GL_TEXTURE_CUBE_MAP, boundTex);

View file

@ -16,6 +16,8 @@
#include <NumericalConstants.h>
#include <QDebug>
#include "Context.h"
using namespace gpu;
class AllocationDebugger {
@ -232,19 +234,55 @@ Resource::Size Resource::Sysmem::append(Size size, const Byte* bytes) {
return 0;
}
std::atomic<uint32_t> Buffer::_bufferCPUCount{ 0 };
std::atomic<Buffer::Size> Buffer::_bufferCPUMemoryUsage{ 0 };
void Buffer::updateBufferCPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
if (prevObjectSize == newObjectSize) {
return;
}
if (prevObjectSize > newObjectSize) {
_bufferCPUMemoryUsage.fetch_sub(prevObjectSize - newObjectSize);
} else {
_bufferCPUMemoryUsage.fetch_add(newObjectSize - prevObjectSize);
}
}
uint32_t Buffer::getBufferCPUCount() {
return _bufferCPUCount.load();
}
Buffer::Size Buffer::getBufferCPUMemoryUsage() {
return _bufferCPUMemoryUsage.load();
}
uint32_t Buffer::getBufferGPUCount() {
return Context::getBufferGPUCount();
}
Buffer::Size Buffer::getBufferGPUMemoryUsage() {
return Context::getBufferGPUMemoryUsage();
}
Buffer::Buffer() :
Resource(),
_sysmem(new Sysmem()) {
_bufferCPUCount++;
}
Buffer::Buffer(Size size, const Byte* bytes) :
Resource(),
_sysmem(new Sysmem(size, bytes)) {
_bufferCPUCount++;
Buffer::updateBufferCPUMemoryUsage(0, _sysmem->getSize());
}
Buffer::Buffer(const Buffer& buf) :
Resource(),
_sysmem(new Sysmem(buf.getSysmem())) {
_bufferCPUCount++;
Buffer::updateBufferCPUMemoryUsage(0, _sysmem->getSize());
}
Buffer& Buffer::operator=(const Buffer& buf) {
@ -253,18 +291,27 @@ Buffer& Buffer::operator=(const Buffer& buf) {
}
Buffer::~Buffer() {
_bufferCPUCount--;
if (_sysmem) {
Buffer::updateBufferCPUMemoryUsage(_sysmem->getSize(), 0);
delete _sysmem;
_sysmem = NULL;
}
}
Buffer::Size Buffer::resize(Size size) {
return editSysmem().resize(size);
auto prevSize = editSysmem().getSize();
auto newSize = editSysmem().resize(size);
Buffer::updateBufferCPUMemoryUsage(prevSize, newSize);
return newSize;
}
Buffer::Size Buffer::setData(Size size, const Byte* data) {
return editSysmem().setData(size, data);
auto prevSize = editSysmem().getSize();
auto newSize = editSysmem().setData(size, data);
Buffer::updateBufferCPUMemoryUsage(prevSize, newSize);
return newSize;
}
Buffer::Size Buffer::setSubData(Size offset, Size size, const Byte* data) {
@ -272,6 +319,9 @@ Buffer::Size Buffer::setSubData(Size offset, Size size, const Byte* data) {
}
Buffer::Size Buffer::append(Size size, const Byte* data) {
return editSysmem().append( size, data);
auto prevSize = editSysmem().getSize();
auto newSize = editSysmem().append( size, data);
Buffer::updateBufferCPUMemoryUsage(prevSize, newSize);
return newSize;
}

View file

@ -16,6 +16,7 @@
#include "Format.h"
#include <vector>
#include <atomic>
#include <memory>
#ifdef _DEBUG
@ -109,7 +110,15 @@ protected:
};
class Buffer : public Resource {
static std::atomic<uint32_t> _bufferCPUCount;
static std::atomic<Size> _bufferCPUMemoryUsage;
static void updateBufferCPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
public:
static uint32_t getBufferCPUCount();
static Size getBufferCPUMemoryUsage();
static uint32_t getBufferGPUCount();
static Size getBufferGPUMemoryUsage();
Buffer();
Buffer(Size size, const Byte* bytes);

View file

@ -345,6 +345,7 @@ public:
uint8 getColorWriteMask() const { return _values.colorWriteMask; }
// All the possible fields
// NOTE: If you change this, you must update GLBackend::GLState::_resetStateCommands
enum Field {
FILL_MODE,
CULL_MODE,
@ -364,6 +365,7 @@ public:
STENCIL_TEST_BACK,
SAMPLE_MASK,
ALPHA_TO_COVERAGE_ENABLE,
BLEND_FUNCTION,

View file

@ -12,20 +12,77 @@
#include "Texture.h"
#include <glm/gtc/constants.hpp>
#include <QDebug>
#include "GPULogging.h"
#include "Context.h"
using namespace gpu;
std::atomic<uint32_t> Texture::_textureCPUCount{ 0 };
std::atomic<Texture::Size> Texture::_textureCPUMemoryUsage{ 0 };
void Texture::updateTextureCPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
if (prevObjectSize == newObjectSize) {
return;
}
if (prevObjectSize > newObjectSize) {
_textureCPUMemoryUsage.fetch_sub(prevObjectSize - newObjectSize);
} else {
_textureCPUMemoryUsage.fetch_add(newObjectSize - prevObjectSize);
}
}
uint32_t Texture::getTextureCPUCount() {
return _textureCPUCount.load();
}
Texture::Size Texture::getTextureCPUMemoryUsage() {
return _textureCPUMemoryUsage.load();
}
uint32_t Texture::getTextureGPUCount() {
return Context::getTextureGPUCount();
}
Texture::Size Texture::getTextureGPUMemoryUsage() {
return Context::getTextureGPUMemoryUsage();
}
uint8 Texture::NUM_FACES_PER_TYPE[NUM_TYPES] = {1, 1, 1, 6};
Texture::Pixels::Pixels(const Element& format, Size size, const Byte* bytes) :
_sysmem(size, bytes),
_format(format),
_sysmem(size, bytes),
_isGPULoaded(false) {
Texture::updateTextureCPUMemoryUsage(0, _sysmem.getSize());
}
Texture::Pixels::~Pixels() {
Texture::updateTextureCPUMemoryUsage(_sysmem.getSize(), 0);
}
Texture::Size Texture::Pixels::resize(Size pSize) {
auto prevSize = _sysmem.getSize();
auto newSize = _sysmem.resize(pSize);
Texture::updateTextureCPUMemoryUsage(prevSize, newSize);
return newSize;
}
Texture::Size Texture::Pixels::setData(const Element& format, Size size, const Byte* bytes ) {
_format = format;
auto prevSize = _sysmem.getSize();
auto newSize = _sysmem.setData(size, bytes);
Texture::updateTextureCPUMemoryUsage(prevSize, newSize);
_isGPULoaded = false;
return newSize;
}
void Texture::Pixels::notifyGPULoaded() {
_isGPULoaded = true;
auto prevSize = _sysmem.getSize();
auto newSize = _sysmem.resize(0);
Texture::updateTextureCPUMemoryUsage(prevSize, newSize);
}
void Texture::Storage::assignTexture(Texture* texture) {
@ -59,15 +116,15 @@ const Texture::PixelsPointer Texture::Storage::getMipFace(uint16 level, uint8 fa
void Texture::Storage::notifyMipFaceGPULoaded(uint16 level, uint8 face) const {
PixelsPointer mipFace = getMipFace(level, face);
if (mipFace && (_type != TEX_CUBE)) {
mipFace->_isGPULoaded = true;
mipFace->_sysmem.resize(0);
// Free the mips
if (mipFace) {
mipFace->notifyGPULoaded();
}
}
bool Texture::Storage::isMipAvailable(uint16 level, uint8 face) const {
PixelsPointer mipFace = getMipFace(level, face);
return (mipFace && mipFace->_sysmem.getSize());
return (mipFace && mipFace->getSize());
}
bool Texture::Storage::allocateMip(uint16 level) {
@ -103,9 +160,7 @@ bool Texture::Storage::assignMipData(uint16 level, const Element& format, Size s
auto faceBytes = bytes;
Size allocated = 0;
for (auto& face : mip) {
face->_format = format;
allocated += face->_sysmem.setData(sizePerFace, faceBytes);
face->_isGPULoaded = false;
allocated += face->setData(format, sizePerFace, faceBytes);
faceBytes += sizePerFace;
}
@ -122,9 +177,7 @@ bool Texture::Storage::assignMipFaceData(uint16 level, const Element& format, Si
Size allocated = 0;
if (face < mip.size()) {
auto mipFace = mip[face];
mipFace->_format = format;
allocated += mipFace->_sysmem.setData(size, bytes);
mipFace->_isGPULoaded = false;
allocated += mipFace->setData(format, size, bytes);
bumpStamp();
}
@ -171,10 +224,12 @@ Texture* Texture::createFromStorage(Storage* storage) {
Texture::Texture():
Resource()
{
_textureCPUCount++;
}
Texture::~Texture()
{
_textureCPUCount--;
}
Texture::Size Texture::resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices) {
@ -230,7 +285,7 @@ Texture::Size Texture::resize(Type type, const Element& texelFormat, uint16 widt
}
// Here the Texture has been fully defined from the gpu point of view (size and format)
_defined = true;
_defined = true;
} else {
_stamp++;
}
@ -292,7 +347,7 @@ bool Texture::assignStoredMip(uint16 level, const Element& format, Size size, co
}
}
// THen check that the mem buffer passed make sense with its format
// THen check that the mem texture passed make sense with its format
Size expectedSize = evalStoredMipSize(level, format);
if (size == expectedSize) {
_storage->assignMipData(level, format, size, bytes);
@ -323,7 +378,7 @@ bool Texture::assignStoredMipFace(uint16 level, const Element& format, Size size
}
}
// THen check that the mem buffer passed make sense with its format
// THen check that the mem texture passed make sense with its format
Size expectedSize = evalStoredMipFaceSize(level, format);
if (size == expectedSize) {
_storage->assignMipFaceData(level, format, size, bytes, face);
@ -364,7 +419,7 @@ uint16 Texture::autoGenerateMips(uint16 maxMip) {
uint16 Texture::getStoredMipWidth(uint16 level) const {
PixelsPointer mipFace = accessStoredMipFace(level);
if (mipFace && mipFace->_sysmem.getSize()) {
if (mipFace && mipFace->getSize()) {
return evalMipWidth(level);
}
return 0;
@ -372,7 +427,7 @@ uint16 Texture::getStoredMipWidth(uint16 level) const {
uint16 Texture::getStoredMipHeight(uint16 level) const {
PixelsPointer mip = accessStoredMipFace(level);
if (mip && mip->_sysmem.getSize()) {
if (mip && mip->getSize()) {
return evalMipHeight(level);
}
return 0;
@ -380,7 +435,7 @@ uint16 Texture::getStoredMipHeight(uint16 level) const {
uint16 Texture::getStoredMipDepth(uint16 level) const {
PixelsPointer mipFace = accessStoredMipFace(level);
if (mipFace && mipFace->_sysmem.getSize()) {
if (mipFace && mipFace->getSize()) {
return evalMipDepth(level);
}
return 0;
@ -388,7 +443,7 @@ uint16 Texture::getStoredMipDepth(uint16 level) const {
uint32 Texture::getStoredMipNumTexels(uint16 level) const {
PixelsPointer mipFace = accessStoredMipFace(level);
if (mipFace && mipFace->_sysmem.getSize()) {
if (mipFace && mipFace->getSize()) {
return evalMipWidth(level) * evalMipHeight(level) * evalMipDepth(level);
}
return 0;
@ -396,7 +451,7 @@ uint32 Texture::getStoredMipNumTexels(uint16 level) const {
uint32 Texture::getStoredMipSize(uint16 level) const {
PixelsPointer mipFace = accessStoredMipFace(level);
if (mipFace && mipFace->_sysmem.getSize()) {
if (mipFace && mipFace->getSize()) {
return evalMipWidth(level) * evalMipHeight(level) * evalMipDepth(level) * getTexelFormat().getSize();
}
return 0;
@ -642,8 +697,8 @@ bool sphericalHarmonicsFromTexture(const gpu::Texture& cubeTexture, std::vector<
// for each face of cube texture
for(int face=0; face < gpu::Texture::NUM_CUBE_FACES; face++) {
auto numComponents = cubeTexture.accessStoredMipFace(0,face)->_format.getScalarCount();
auto data = cubeTexture.accessStoredMipFace(0,face)->_sysmem.readData();
auto numComponents = cubeTexture.accessStoredMipFace(0,face)->getFormat().getScalarCount();
auto data = cubeTexture.accessStoredMipFace(0,face)->readData();
if (data == nullptr) {
continue;
}

View file

@ -138,7 +138,14 @@ protected:
};
class Texture : public Resource {
static std::atomic<uint32_t> _textureCPUCount;
static std::atomic<Size> _textureCPUMemoryUsage;
static void updateTextureCPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
public:
static uint32_t getTextureCPUCount();
static Size getTextureCPUMemoryUsage();
static uint32_t getTextureGPUCount();
static Size getTextureGPUMemoryUsage();
class Usage {
public:
@ -194,9 +201,21 @@ public:
Pixels(const Element& format, Size size, const Byte* bytes);
~Pixels();
Sysmem _sysmem;
const Byte* readData() const { return _sysmem.readData(); }
Size getSize() const { return _sysmem.getSize(); }
Size resize(Size pSize);
Size setData(const Element& format, Size size, const Byte* bytes );
const Element& getFormat() const { return _format; }
void notifyGPULoaded();
protected:
Element _format;
Sysmem _sysmem;
bool _isGPULoaded;
friend class Texture;
};
typedef std::shared_ptr< Pixels > PixelsPointer;
@ -448,7 +467,7 @@ typedef std::shared_ptr<Texture> TexturePointer;
typedef std::vector< TexturePointer > Textures;
// TODO: For now TextureView works with Buffer as a place holder for the Texture.
// TODO: For now TextureView works with Texture as a place holder for the Texture.
// The overall logic should be about the same except that the Texture will be a real GL Texture under the hood
class TextureView {
public:

View file

@ -1,53 +1,109 @@
//
// ModelCache.cpp
// interface/src/renderer
// libraries/model-networking
//
// Created by Andrzej Kapolka on 6/21/13.
// Copyright 2013 High Fidelity, Inc.
// Created by Zach Pomerantz on 3/15/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "ModelCache.h"
#include <FSTReader.h>
#include "FBXReader.h"
#include "OBJReader.h"
#include <cmath>
#include <gpu/Batch.h>
#include <gpu/Stream.h>
#include <QNetworkReply>
#include <QThreadPool>
#include <FSTReader.h>
#include <NumericalConstants.h>
#include "TextureCache.h"
#include "ModelNetworkingLogging.h"
#include "model/TextureMap.h"
class GeometryReader;
//#define WANT_DEBUG
class GeometryExtra {
public:
const QVariantHash& mapping;
const QUrl& textureBaseUrl;
};
ModelCache::ModelCache()
{
const qint64 GEOMETRY_DEFAULT_UNUSED_MAX_SIZE = DEFAULT_UNUSED_MAX_SIZE;
setUnusedResourceCacheSize(GEOMETRY_DEFAULT_UNUSED_MAX_SIZE);
class GeometryMappingResource : public GeometryResource {
Q_OBJECT
public:
GeometryMappingResource(const QUrl& url) : GeometryResource(url) {};
virtual void downloadFinished(const QByteArray& data) override;
private slots:
void onGeometryMappingLoaded(bool success);
private:
GeometryResource::Pointer _geometryResource;
};
void GeometryMappingResource::downloadFinished(const QByteArray& data) {
auto mapping = FSTReader::readMapping(data);
QString filename = mapping.value("filename").toString();
if (filename.isNull()) {
qCDebug(modelnetworking) << "Mapping file" << _url << "has no \"filename\" field";
finishedLoading(false);
} else {
QUrl url = _url.resolved(filename);
QUrl textureBaseUrl;
QString texdir = mapping.value("texdir").toString();
if (!texdir.isNull()) {
if (!texdir.endsWith('/')) {
texdir += '/';
}
textureBaseUrl = _url.resolved(texdir);
}
auto modelCache = DependencyManager::get<ModelCache>();
GeometryExtra extra{ mapping, textureBaseUrl };
// Get the raw GeometryResource, not the wrapped NetworkGeometry
_geometryResource = modelCache->getResource(url, QUrl(), true, &extra).staticCast<GeometryResource>();
if (_geometryResource->isLoaded()) {
onGeometryMappingLoaded(!_geometryResource->getURL().isEmpty());
} else {
connect(_geometryResource.data(), &Resource::finished, this, &GeometryMappingResource::onGeometryMappingLoaded);
}
// Avoid caching nested resources - their references will be held by the parent
_geometryResource->_isCacheable = false;
}
}
ModelCache::~ModelCache() {
void GeometryMappingResource::onGeometryMappingLoaded(bool success) {
if (success) {
_geometry = _geometryResource->_geometry;
_shapes = _geometryResource->_shapes;
_meshes = _geometryResource->_meshes;
_materials = _geometryResource->_materials;
}
finishedLoading(success);
}
QSharedPointer<Resource> ModelCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
bool delayLoad, const void* extra) {
// NetworkGeometry is no longer a subclass of Resource, but requires this method because, it is pure virtual.
assert(false);
return QSharedPointer<Resource>();
}
class GeometryReader : public QRunnable {
public:
GeometryReader(QWeakPointer<Resource>& resource, const QUrl& url, const QVariantHash& mapping,
const QByteArray& data) :
_resource(resource), _url(url), _mapping(mapping), _data(data) {}
virtual ~GeometryReader() = default;
virtual void run() override;
GeometryReader::GeometryReader(const QUrl& url, const QByteArray& data, const QVariantHash& mapping) :
_url(url),
_data(data),
_mapping(mapping) {
}
private:
QWeakPointer<Resource> _resource;
QUrl _url;
QVariantHash _mapping;
QByteArray _data;
};
void GeometryReader::run() {
auto originalPriority = QThread::currentThread()->priority();
@ -55,458 +111,389 @@ void GeometryReader::run() {
originalPriority = QThread::NormalPriority;
}
QThread::currentThread()->setPriority(QThread::LowPriority);
// Ensure the resource is still being requested
auto resource = _resource.toStrongRef();
if (!resource) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
return;
}
try {
if (_data.isEmpty()) {
throw QString("Reply is NULL ?!");
throw QString("reply is NULL");
}
QString urlname = _url.path().toLower();
bool urlValid = true;
urlValid &= !urlname.isEmpty();
urlValid &= !_url.path().isEmpty();
urlValid &= _url.path().toLower().endsWith(".fbx") || _url.path().toLower().endsWith(".obj");
if (urlValid) {
// Let's read the binaries from the network
FBXGeometry* fbxgeo = nullptr;
QString urlname = _url.path().toLower();
if (!urlname.isEmpty() && !_url.path().isEmpty() &&
(_url.path().toLower().endsWith(".fbx") || _url.path().toLower().endsWith(".obj"))) {
FBXGeometry* fbxGeometry = nullptr;
if (_url.path().toLower().endsWith(".fbx")) {
const bool grabLightmaps = true;
const float lightmapLevel = 1.0f;
fbxgeo = readFBX(_data, _mapping, _url.path(), grabLightmaps, lightmapLevel);
if (fbxgeo->meshes.size() == 0 && fbxgeo->joints.size() == 0) {
// empty fbx geometry, indicates error
fbxGeometry = readFBX(_data, _mapping, _url.path());
if (fbxGeometry->meshes.size() == 0 && fbxGeometry->joints.size() == 0) {
throw QString("empty geometry, possibly due to an unsupported FBX version");
}
} else if (_url.path().toLower().endsWith(".obj")) {
fbxgeo = OBJReader().readOBJ(_data, _mapping, _url);
fbxGeometry = OBJReader().readOBJ(_data, _mapping, _url);
} else {
QString errorStr("unsupported format");
throw errorStr;
throw QString("unsupported format");
}
emit onSuccess(fbxgeo);
QMetaObject::invokeMethod(resource.data(), "setGeometryDefinition",
Q_ARG(void*, fbxGeometry));
} else {
throw QString("url is invalid");
}
} catch (const QString& error) {
qCDebug(modelnetworking) << "Error reading " << _url << ": " << error;
emit onError(NetworkGeometry::ModelParseError, error);
QMetaObject::invokeMethod(resource.data(), "finishedLoading", Q_ARG(bool, false));
}
QThread::currentThread()->setPriority(originalPriority);
}
NetworkGeometry::NetworkGeometry(const QUrl& url, bool delayLoad, const QVariantHash& mapping, const QUrl& textureBaseUrl) :
_url(url),
_mapping(mapping),
_textureBaseUrl(textureBaseUrl.isValid() ? textureBaseUrl : url) {
class GeometryDefinitionResource : public GeometryResource {
Q_OBJECT
public:
GeometryDefinitionResource(const QUrl& url, const QVariantHash& mapping, const QUrl& textureBaseUrl) :
GeometryResource(url), _mapping(mapping), _textureBaseUrl(textureBaseUrl.isValid() ? textureBaseUrl : url) {}
if (delayLoad) {
_state = DelayState;
} else {
attemptRequestInternal();
}
virtual void downloadFinished(const QByteArray& data) override;
protected:
Q_INVOKABLE void setGeometryDefinition(void* fbxGeometry);
private:
QVariantHash _mapping;
QUrl _textureBaseUrl;
};
void GeometryDefinitionResource::downloadFinished(const QByteArray& data) {
QThreadPool::globalInstance()->start(new GeometryReader(_self, _url, _mapping, data));
}
NetworkGeometry::~NetworkGeometry() {
if (_resource) {
_resource->deleteLater();
}
}
void GeometryDefinitionResource::setGeometryDefinition(void* fbxGeometry) {
// Assume ownership of the geometry pointer
_geometry.reset(static_cast<FBXGeometry*>(fbxGeometry));
void NetworkGeometry::attemptRequest() {
if (_state == DelayState) {
attemptRequestInternal();
}
}
void NetworkGeometry::attemptRequestInternal() {
if (_url.path().toLower().endsWith(".fst")) {
_mappingUrl = _url;
requestMapping(_url);
} else {
_modelUrl = _url;
requestModel(_url);
}
}
bool NetworkGeometry::isLoaded() const {
return _state == SuccessState;
}
bool NetworkGeometry::isLoadedWithTextures() const {
if (!isLoaded()) {
return false;
// Copy materials
QHash<QString, size_t> materialIDAtlas;
for (const FBXMaterial& material : _geometry->materials) {
materialIDAtlas[material.materialID] = _materials.size();
_materials.push_back(std::make_shared<NetworkMaterial>(material, _textureBaseUrl));
}
if (!_isLoadedWithTextures) {
_hasTransparentTextures = false;
for (auto&& material : _materials) {
if ((material->albedoTexture && !material->albedoTexture->isLoaded()) ||
(material->normalTexture && !material->normalTexture->isLoaded()) ||
(material->roughnessTexture && !material->roughnessTexture->isLoaded()) ||
(material->metallicTexture && !material->metallicTexture->isLoaded()) ||
(material->occlusionTexture && !material->occlusionTexture->isLoaded()) ||
(material->emissiveTexture && !material->emissiveTexture->isLoaded()) ||
(material->lightmapTexture && !material->lightmapTexture->isLoaded())) {
return false;
}
if (material->albedoTexture && material->albedoTexture->getGPUTexture()) {
// Reassign the texture to make sure that itsalbedo alpha channel material key is detected correctly
material->_material->setTextureMap(model::MaterialKey::ALBEDO_MAP, material->_material->getTextureMap(model::MaterialKey::ALBEDO_MAP));
const auto& usage = material->albedoTexture->getGPUTexture()->getUsage();
bool isTransparentTexture = usage.isAlpha() && !usage.isAlphaMask();
_hasTransparentTextures |= isTransparentTexture;
}
}
_isLoadedWithTextures = true;
}
return true;
}
void NetworkGeometry::setTextureWithNameToURL(const QString& name, const QUrl& url) {
if (_meshes.size() > 0) {
auto textureCache = DependencyManager::get<TextureCache>();
for (auto&& material : _materials) {
auto networkMaterial = material->_material;
auto oldTextureMaps = networkMaterial->getTextureMaps();
if (material->albedoTextureName == name) {
material->albedoTexture = textureCache->getTexture(url, DEFAULT_TEXTURE);
auto albedoMap = model::TextureMapPointer(new model::TextureMap());
albedoMap->setTextureSource(material->albedoTexture->_textureSource);
albedoMap->setTextureTransform(oldTextureMaps[model::MaterialKey::ALBEDO_MAP]->getTextureTransform());
// when reassigning the albedo texture we also check for the alpha channel used as opacity
albedoMap->setUseAlphaChannel(true);
networkMaterial->setTextureMap(model::MaterialKey::ALBEDO_MAP, albedoMap);
} else if (material->normalTextureName == name) {
material->normalTexture = textureCache->getTexture(url);
auto normalMap = model::TextureMapPointer(new model::TextureMap());
normalMap->setTextureSource(material->normalTexture->_textureSource);
networkMaterial->setTextureMap(model::MaterialKey::NORMAL_MAP, normalMap);
} else if (material->roughnessTextureName == name) {
// FIXME: If passing a gloss map instead of a roughmap how to say that ? looking for gloss in the name ?
material->roughnessTexture = textureCache->getTexture(url, ROUGHNESS_TEXTURE);
auto roughnessMap = model::TextureMapPointer(new model::TextureMap());
roughnessMap->setTextureSource(material->roughnessTexture->_textureSource);
networkMaterial->setTextureMap(model::MaterialKey::ROUGHNESS_MAP, roughnessMap);
} else if (material->metallicTextureName == name) {
// FIXME: If passing a specular map instead of a metallic how to say that ? looking for wtf in the name ?
material->metallicTexture = textureCache->getTexture(url, METALLIC_TEXTURE);
auto glossMap = model::TextureMapPointer(new model::TextureMap());
glossMap->setTextureSource(material->metallicTexture->_textureSource);
networkMaterial->setTextureMap(model::MaterialKey::METALLIC_MAP, glossMap);
} else if (material->emissiveTextureName == name) {
material->emissiveTexture = textureCache->getTexture(url, EMISSIVE_TEXTURE);
auto emissiveMap = model::TextureMapPointer(new model::TextureMap());
emissiveMap->setTextureSource(material->emissiveTexture->_textureSource);
networkMaterial->setTextureMap(model::MaterialKey::EMISSIVE_MAP, emissiveMap);
} else if (material->lightmapTextureName == name) {
material->lightmapTexture = textureCache->getTexture(url, LIGHTMAP_TEXTURE);
auto lightmapMap = model::TextureMapPointer(new model::TextureMap());
lightmapMap->setTextureSource(material->lightmapTexture->_textureSource);
lightmapMap->setTextureTransform(
oldTextureMaps[model::MaterialKey::LIGHTMAP_MAP]->getTextureTransform());
glm::vec2 oldOffsetScale =
oldTextureMaps[model::MaterialKey::LIGHTMAP_MAP]->getLightmapOffsetScale();
lightmapMap->setLightmapOffsetScale(oldOffsetScale.x, oldOffsetScale.y);
networkMaterial->setTextureMap(model::MaterialKey::LIGHTMAP_MAP, lightmapMap);
}
}
} else {
qCWarning(modelnetworking) << "Ignoring setTextureWithNameToURL() geometry not ready." << name << url;
}
_isLoadedWithTextures = false;
}
QStringList NetworkGeometry::getTextureNames() const {
QStringList result;
for (auto&& material : _materials) {
if (!material->emissiveTextureName.isEmpty() && material->emissiveTexture) {
QString textureURL = material->emissiveTexture->getURL().toString();
result << material->emissiveTextureName + ":\"" + textureURL + "\"";
}
if (!material->albedoTextureName.isEmpty() && material->albedoTexture) {
QString textureURL = material->albedoTexture->getURL().toString();
result << material->albedoTextureName + ":\"" + textureURL + "\"";
}
if (!material->normalTextureName.isEmpty() && material->normalTexture) {
QString textureURL = material->normalTexture->getURL().toString();
result << material->normalTextureName + ":\"" + textureURL + "\"";
}
if (!material->roughnessTextureName.isEmpty() && material->roughnessTexture) {
QString textureURL = material->roughnessTexture->getURL().toString();
result << material->roughnessTextureName + ":\"" + textureURL + "\"";
}
if (!material->metallicTextureName.isEmpty() && material->metallicTexture) {
QString textureURL = material->metallicTexture->getURL().toString();
result << material->metallicTextureName + ":\"" + textureURL + "\"";
}
if (!material->occlusionTextureName.isEmpty() && material->occlusionTexture) {
QString textureURL = material->occlusionTexture->getURL().toString();
result << material->occlusionTextureName + ":\"" + textureURL + "\"";
}
if (!material->lightmapTextureName.isEmpty() && material->lightmapTexture) {
QString textureURL = material->lightmapTexture->getURL().toString();
result << material->lightmapTextureName + ":\"" + textureURL + "\"";
}
}
return result;
}
void NetworkGeometry::requestMapping(const QUrl& url) {
_state = RequestMappingState;
if (_resource) {
_resource->deleteLater();
}
_resource = new Resource(url, false);
connect(_resource, &Resource::loaded, this, &NetworkGeometry::mappingRequestDone);
connect(_resource, &Resource::failed, this, &NetworkGeometry::mappingRequestError);
}
void NetworkGeometry::requestModel(const QUrl& url) {
_state = RequestModelState;
if (_resource) {
_resource->deleteLater();
}
_modelUrl = url;
_resource = new Resource(url, false);
connect(_resource, &Resource::loaded, this, &NetworkGeometry::modelRequestDone);
connect(_resource, &Resource::failed, this, &NetworkGeometry::modelRequestError);
}
void NetworkGeometry::mappingRequestDone(const QByteArray data) {
assert(_state == RequestMappingState);
// parse the mapping file
_mapping = FSTReader::readMapping(data);
QUrl replyUrl = _mappingUrl;
QString modelUrlStr = _mapping.value("filename").toString();
if (modelUrlStr.isNull()) {
qCDebug(modelnetworking) << "Mapping file " << _url << "has no \"filename\" entry";
emit onFailure(*this, MissingFilenameInMapping);
} else {
// read _textureBase from mapping file, if present
QString texdir = _mapping.value("texdir").toString();
if (!texdir.isNull()) {
if (!texdir.endsWith('/')) {
texdir += '/';
}
_textureBaseUrl = replyUrl.resolved(texdir);
}
_modelUrl = replyUrl.resolved(modelUrlStr);
requestModel(_modelUrl);
}
}
void NetworkGeometry::mappingRequestError(QNetworkReply::NetworkError error) {
assert(_state == RequestMappingState);
_state = ErrorState;
emit onFailure(*this, MappingRequestError);
}
void NetworkGeometry::modelRequestDone(const QByteArray data) {
assert(_state == RequestModelState);
_state = ParsingModelState;
// asynchronously parse the model file.
GeometryReader* geometryReader = new GeometryReader(_modelUrl, data, _mapping);
connect(geometryReader, SIGNAL(onSuccess(FBXGeometry*)), SLOT(modelParseSuccess(FBXGeometry*)));
connect(geometryReader, SIGNAL(onError(int, QString)), SLOT(modelParseError(int, QString)));
QThreadPool::globalInstance()->start(geometryReader);
}
void NetworkGeometry::modelRequestError(QNetworkReply::NetworkError error) {
assert(_state == RequestModelState);
_state = ErrorState;
emit onFailure(*this, ModelRequestError);
}
static NetworkMesh* buildNetworkMesh(const FBXMesh& mesh, const QUrl& textureBaseUrl) {
NetworkMesh* networkMesh = new NetworkMesh();
networkMesh->_mesh = mesh._mesh;
return networkMesh;
}
static model::TextureMapPointer setupNetworkTextureMap(NetworkGeometry* geometry, const QUrl& textureBaseUrl,
const FBXTexture& texture, TextureType type,
NetworkTexturePointer& networkTexture, QString& networkTextureName) {
auto textureCache = DependencyManager::get<TextureCache>();
// If content is inline, cache it under the fbx file, not its base url
const auto baseUrl = texture.content.isEmpty() ? textureBaseUrl : QUrl(textureBaseUrl.url() + "/");
const auto filename = baseUrl.resolved(QUrl(texture.filename));
networkTexture = textureCache->getTexture(filename, type, texture.content);
QObject::connect(networkTexture.data(), &NetworkTexture::networkTextureCreated, geometry, &NetworkGeometry::textureLoaded);
networkTextureName = texture.name;
auto map = std::make_shared<model::TextureMap>();
map->setTextureSource(networkTexture->_textureSource);
return map;
}
static NetworkMaterial* buildNetworkMaterial(NetworkGeometry* geometry, const FBXMaterial& material, const QUrl& textureBaseUrl) {
NetworkMaterial* networkMaterial = new NetworkMaterial();
networkMaterial->_material = material._material;
if (!material.albedoTexture.filename.isEmpty()) {
auto albedoMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.albedoTexture, DEFAULT_TEXTURE,
networkMaterial->albedoTexture, networkMaterial->albedoTextureName);
albedoMap->setTextureTransform(material.albedoTexture.transform);
if (!material.opacityTexture.filename.isEmpty()) {
if (material.albedoTexture.filename == material.opacityTexture.filename) {
// Best case scenario, just indicating that the albedo map contains transparency
albedoMap->setUseAlphaChannel(true);
} else {
// Opacity Map is different from the Abledo map, not supported
}
}
material._material->setTextureMap(model::MaterialKey::ALBEDO_MAP, albedoMap);
}
if (!material.normalTexture.filename.isEmpty()) {
auto normalMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.normalTexture,
(material.normalTexture.isBumpmap ? BUMP_TEXTURE : NORMAL_TEXTURE),
networkMaterial->normalTexture, networkMaterial->normalTextureName);
networkMaterial->_material->setTextureMap(model::MaterialKey::NORMAL_MAP, normalMap);
}
// Roughness first or gloss maybe
if (!material.roughnessTexture.filename.isEmpty()) {
auto roughnessMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.roughnessTexture, ROUGHNESS_TEXTURE,
networkMaterial->roughnessTexture, networkMaterial->roughnessTextureName);
material._material->setTextureMap(model::MaterialKey::ROUGHNESS_MAP, roughnessMap);
} else if (!material.glossTexture.filename.isEmpty()) {
auto roughnessMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.glossTexture, GLOSS_TEXTURE,
networkMaterial->roughnessTexture, networkMaterial->roughnessTextureName);
material._material->setTextureMap(model::MaterialKey::ROUGHNESS_MAP, roughnessMap);
}
// Metallic first or specular maybe
if (!material.metallicTexture.filename.isEmpty()) {
auto metallicMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.metallicTexture, METALLIC_TEXTURE,
networkMaterial->metallicTexture, networkMaterial->metallicTextureName);
material._material->setTextureMap(model::MaterialKey::METALLIC_MAP, metallicMap);
} else if (!material.specularTexture.filename.isEmpty()) {
auto metallicMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.specularTexture, SPECULAR_TEXTURE,
networkMaterial->metallicTexture, networkMaterial->metallicTextureName);
material._material->setTextureMap(model::MaterialKey::METALLIC_MAP, metallicMap);
}
if (!material.occlusionTexture.filename.isEmpty()) {
auto occlusionMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.occlusionTexture, OCCLUSION_TEXTURE,
networkMaterial->occlusionTexture, networkMaterial->occlusionTextureName);
material._material->setTextureMap(model::MaterialKey::OCCLUSION_MAP, occlusionMap);
}
if (!material.emissiveTexture.filename.isEmpty()) {
auto emissiveMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.emissiveTexture, EMISSIVE_TEXTURE,
networkMaterial->emissiveTexture, networkMaterial->emissiveTextureName);
material._material->setTextureMap(model::MaterialKey::EMISSIVE_MAP, emissiveMap);
}
if (!material.lightmapTexture.filename.isEmpty()) {
auto lightmapMap = setupNetworkTextureMap(geometry, textureBaseUrl, material.lightmapTexture, LIGHTMAP_TEXTURE,
networkMaterial->lightmapTexture, networkMaterial->lightmapTextureName);
lightmapMap->setTextureTransform(material.lightmapTexture.transform);
lightmapMap->setLightmapOffsetScale(material.lightmapParams.x, material.lightmapParams.y);
material._material->setTextureMap(model::MaterialKey::LIGHTMAP_MAP, lightmapMap);
}
return networkMaterial;
}
void NetworkGeometry::modelParseSuccess(FBXGeometry* geometry) {
// assume owner ship of geometry pointer
_geometry.reset(geometry);
foreach(const FBXMesh& mesh, _geometry->meshes) {
_meshes.emplace_back(buildNetworkMesh(mesh, _textureBaseUrl));
}
QHash<QString, size_t> fbxMatIDToMatID;
foreach(const FBXMaterial& material, _geometry->materials) {
fbxMatIDToMatID[material.materialID] = _materials.size();
_materials.emplace_back(buildNetworkMaterial(this, material, _textureBaseUrl));
}
std::shared_ptr<NetworkMeshes> meshes = std::make_shared<NetworkMeshes>();
std::shared_ptr<NetworkShapes> shapes = std::make_shared<NetworkShapes>();
int meshID = 0;
foreach(const FBXMesh& mesh, _geometry->meshes) {
for (const FBXMesh& mesh : _geometry->meshes) {
// Copy mesh pointers
meshes->emplace_back(mesh._mesh);
int partID = 0;
foreach (const FBXMeshPart& part, mesh.parts) {
NetworkShape* networkShape = new NetworkShape();
networkShape->_meshID = meshID;
networkShape->_partID = partID;
networkShape->_materialID = (int)fbxMatIDToMatID[part.materialID];
_shapes.emplace_back(networkShape);
for (const FBXMeshPart& part : mesh.parts) {
// Construct local shapes
shapes->push_back(std::make_shared<NetworkShape>(meshID, partID, (int)materialIDAtlas[part.materialID]));
partID++;
}
meshID++;
}
_meshes = meshes;
_shapes = shapes;
_state = SuccessState;
emit onSuccess(*this, *_geometry.get());
delete _resource;
_resource = nullptr;
finishedLoading(true);
}
void NetworkGeometry::modelParseError(int error, QString str) {
_state = ErrorState;
emit onFailure(*this, (NetworkGeometry::Error)error);
delete _resource;
_resource = nullptr;
ModelCache::ModelCache() {
const qint64 GEOMETRY_DEFAULT_UNUSED_MAX_SIZE = DEFAULT_UNUSED_MAX_SIZE;
setUnusedResourceCacheSize(GEOMETRY_DEFAULT_UNUSED_MAX_SIZE);
}
const NetworkMaterial* NetworkGeometry::getShapeMaterial(int shapeID) {
if ((shapeID >= 0) && (shapeID < (int)_shapes.size())) {
int materialID = _shapes[shapeID]->_materialID;
if ((materialID >= 0) && ((unsigned int)materialID < _materials.size())) {
return _materials[materialID].get();
} else {
return 0;
}
QSharedPointer<Resource> ModelCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
bool delayLoad, const void* extra) {
const GeometryExtra* geometryExtra = static_cast<const GeometryExtra*>(extra);
Resource* resource = nullptr;
if (url.path().toLower().endsWith(".fst")) {
resource = new GeometryMappingResource(url);
} else {
return 0;
resource = new GeometryDefinitionResource(url, geometryExtra->mapping, geometryExtra->textureBaseUrl);
}
return QSharedPointer<Resource>(resource, &Resource::allReferencesCleared);
}
std::shared_ptr<NetworkGeometry> ModelCache::getGeometry(const QUrl& url, const QVariantHash& mapping, const QUrl& textureBaseUrl) {
GeometryExtra geometryExtra = { mapping, textureBaseUrl };
GeometryResource::Pointer resource = getResource(url, QUrl(), true, &geometryExtra).staticCast<GeometryResource>();
return std::make_shared<NetworkGeometry>(resource);
}
const QVariantMap Geometry::getTextures() const {
QVariantMap textures;
for (const auto& material : _materials) {
for (const auto& texture : material->_textures) {
if (texture.texture) {
textures[texture.name] = texture.texture->getURL();
}
}
}
return textures;
}
// FIXME: The materials should only be copied when modified, but the Model currently caches the original
Geometry::Geometry(const Geometry& geometry) {
_geometry = geometry._geometry;
_meshes = geometry._meshes;
_shapes = geometry._shapes;
_materials.reserve(geometry._materials.size());
for (const auto& material : geometry._materials) {
_materials.push_back(std::make_shared<NetworkMaterial>(*material));
}
}
void NetworkGeometry::textureLoaded(const QWeakPointer<NetworkTexture>& networkTexture) {
numTextureLoaded++;
void Geometry::setTextures(const QVariantMap& textureMap) {
if (_meshes->size() > 0) {
for (auto& material : _materials) {
// Check if any material textures actually changed
if (std::any_of(material->_textures.cbegin(), material->_textures.cend(),
[&textureMap](const NetworkMaterial::Textures::value_type& it) { return it.texture && textureMap.contains(it.name); })) {
// FIXME: The Model currently caches the materials (waste of space!)
// so they must be copied in the Geometry copy-ctor
// if (material->isOriginal()) {
// // Copy the material to avoid mutating the cached version
// material = std::make_shared<NetworkMaterial>(*material);
//}
material->setTextures(textureMap);
_areTexturesLoaded = false;
}
}
} else {
qCWarning(modelnetworking) << "Ignoring setTextures(); geometry not ready";
}
}
bool Geometry::areTexturesLoaded() const {
if (!_areTexturesLoaded) {
_hasTransparentTextures = false;
for (auto& material : _materials) {
// Check if material textures are loaded
if (std::any_of(material->_textures.cbegin(), material->_textures.cend(),
[](const NetworkMaterial::Textures::value_type& it) { return it.texture && !it.texture->isLoaded(); })) {
return false;
}
// If material textures are loaded, check the material translucency
const auto albedoTexture = material->_textures[NetworkMaterial::MapChannel::ALBEDO_MAP];
if (albedoTexture.texture && albedoTexture.texture->getGPUTexture()) {
material->resetOpacityMap();
_hasTransparentTextures |= material->getKey().isTranslucent();
}
}
_areTexturesLoaded = true;
}
return true;
}
const std::shared_ptr<const NetworkMaterial> Geometry::getShapeMaterial(int shapeID) const {
if ((shapeID >= 0) && (shapeID < (int)_shapes->size())) {
int materialID = _shapes->at(shapeID)->materialID;
if ((materialID >= 0) && (materialID < (int)_materials.size())) {
return _materials[materialID];
}
}
return nullptr;
}
NetworkGeometry::NetworkGeometry(const GeometryResource::Pointer& networkGeometry) : _resource(networkGeometry) {
connect(_resource.data(), &Resource::finished, this, &NetworkGeometry::resourceFinished);
connect(_resource.data(), &Resource::onRefresh, this, &NetworkGeometry::resourceRefreshed);
if (_resource->isLoaded()) {
resourceFinished(!_resource->getURL().isEmpty());
}
}
void NetworkGeometry::resourceFinished(bool success) {
// FIXME: Model is not set up to handle a refresh
if (_instance) {
return;
}
if (success) {
_instance = std::make_shared<Geometry>(*_resource);
}
emit finished(success);
}
void NetworkGeometry::resourceRefreshed() {
// FIXME: Model is not set up to handle a refresh
// _instance.reset();
}
const QString NetworkMaterial::NO_TEXTURE = QString();
const QString& NetworkMaterial::getTextureName(MapChannel channel) {
if (_textures[channel].texture) {
return _textures[channel].name;
}
return NO_TEXTURE;
}
QUrl NetworkMaterial::getTextureUrl(const QUrl& url, const FBXTexture& texture) {
// If content is inline, cache it under the fbx file, not its url
const auto baseUrl = texture.content.isEmpty() ? url : QUrl(url.url() + "/");
return baseUrl.resolved(QUrl(texture.filename));
}
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& baseUrl, const FBXTexture& fbxTexture,
TextureType type, MapChannel channel) {
const auto url = getTextureUrl(baseUrl, fbxTexture);
const auto texture = DependencyManager::get<TextureCache>()->getTexture(url, type, fbxTexture.content);
_textures[channel] = Texture { fbxTexture.name, texture };
auto map = std::make_shared<model::TextureMap>();
map->setTextureSource(texture->_textureSource);
return map;
}
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& url, TextureType type, MapChannel channel) {
const auto texture = DependencyManager::get<TextureCache>()->getTexture(url, type);
_textures[channel].texture = texture;
auto map = std::make_shared<model::TextureMap>();
map->setTextureSource(texture->_textureSource);
return map;
}
NetworkMaterial::NetworkMaterial(const FBXMaterial& material, const QUrl& textureBaseUrl) :
model::Material(*material._material)
{
_textures = Textures(MapChannel::NUM_MAP_CHANNELS);
if (!material.albedoTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.albedoTexture, DEFAULT_TEXTURE, MapChannel::ALBEDO_MAP);
_albedoTransform = material.albedoTexture.transform;
map->setTextureTransform(_albedoTransform);
if (!material.opacityTexture.filename.isEmpty()) {
if (material.albedoTexture.filename == material.opacityTexture.filename) {
// Best case scenario, just indicating that the albedo map contains transparency
// TODO: Different albedo/opacity maps are not currently supported
map->setUseAlphaChannel(true);
}
}
setTextureMap(MapChannel::ALBEDO_MAP, map);
}
if (!material.normalTexture.filename.isEmpty()) {
auto type = (material.normalTexture.isBumpmap ? BUMP_TEXTURE : NORMAL_TEXTURE);
auto map = fetchTextureMap(textureBaseUrl, material.normalTexture, type, MapChannel::NORMAL_MAP);
setTextureMap(MapChannel::NORMAL_MAP, map);
}
if (!material.roughnessTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.roughnessTexture, ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
} else if (!material.glossTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.glossTexture, GLOSS_TEXTURE, MapChannel::ROUGHNESS_MAP);
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
}
if (!material.metallicTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.metallicTexture, METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
setTextureMap(MapChannel::METALLIC_MAP, map);
} else if (!material.specularTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.specularTexture, SPECULAR_TEXTURE, MapChannel::METALLIC_MAP);
setTextureMap(MapChannel::METALLIC_MAP, map);
}
if (!material.occlusionTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.occlusionTexture, OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
setTextureMap(MapChannel::OCCLUSION_MAP, map);
}
if (!material.emissiveTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.emissiveTexture, EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
setTextureMap(MapChannel::EMISSIVE_MAP, map);
}
if (!material.lightmapTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.lightmapTexture, LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
_lightmapTransform = material.lightmapTexture.transform;
_lightmapParams = material.lightmapParams;
map->setTextureTransform(_lightmapTransform);
map->setLightmapOffsetScale(_lightmapParams.x, _lightmapParams.y);
setTextureMap(MapChannel::LIGHTMAP_MAP, map);
}
}
void NetworkMaterial::setTextures(const QVariantMap& textureMap) {
_isOriginal = false;
const auto& albedoName = getTextureName(MapChannel::ALBEDO_MAP);
const auto& normalName = getTextureName(MapChannel::NORMAL_MAP);
const auto& roughnessName = getTextureName(MapChannel::ROUGHNESS_MAP);
const auto& metallicName = getTextureName(MapChannel::METALLIC_MAP);
const auto& occlusionName = getTextureName(MapChannel::OCCLUSION_MAP);
const auto& emissiveName = getTextureName(MapChannel::EMISSIVE_MAP);
const auto& lightmapName = getTextureName(MapChannel::LIGHTMAP_MAP);
if (!albedoName.isEmpty()) {
auto url = textureMap.contains(albedoName) ? textureMap[albedoName].toUrl() : QUrl();
auto map = fetchTextureMap(url, DEFAULT_TEXTURE, MapChannel::ALBEDO_MAP);
map->setTextureTransform(_albedoTransform);
// when reassigning the albedo texture we also check for the alpha channel used as opacity
map->setUseAlphaChannel(true);
setTextureMap(MapChannel::ALBEDO_MAP, map);
}
if (!normalName.isEmpty()) {
auto url = textureMap.contains(normalName) ? textureMap[normalName].toUrl() : QUrl();
auto map = fetchTextureMap(url, DEFAULT_TEXTURE, MapChannel::NORMAL_MAP);
setTextureMap(MapChannel::NORMAL_MAP, map);
}
if (!roughnessName.isEmpty()) {
auto url = textureMap.contains(roughnessName) ? textureMap[roughnessName].toUrl() : QUrl();
// FIXME: If passing a gloss map instead of a roughmap how do we know?
auto map = fetchTextureMap(url, ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
}
if (!metallicName.isEmpty()) {
auto url = textureMap.contains(metallicName) ? textureMap[metallicName].toUrl() : QUrl();
// FIXME: If passing a specular map instead of a metallic how do we know?
auto map = fetchTextureMap(url, METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
setTextureMap(MapChannel::METALLIC_MAP, map);
}
if (!occlusionName.isEmpty()) {
auto url = textureMap.contains(occlusionName) ? textureMap[occlusionName].toUrl() : QUrl();
auto map = fetchTextureMap(url, OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
setTextureMap(MapChannel::OCCLUSION_MAP, map);
}
if (!emissiveName.isEmpty()) {
auto url = textureMap.contains(emissiveName) ? textureMap[emissiveName].toUrl() : QUrl();
auto map = fetchTextureMap(url, EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
setTextureMap(MapChannel::EMISSIVE_MAP, map);
}
if (!lightmapName.isEmpty()) {
auto url = textureMap.contains(lightmapName) ? textureMap[lightmapName].toUrl() : QUrl();
auto map = fetchTextureMap(url, LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
map->setTextureTransform(_lightmapTransform);
map->setLightmapOffsetScale(_lightmapParams.x, _lightmapParams.y);
setTextureMap(MapChannel::LIGHTMAP_MAP, map);
}
}
#include "ModelCache.moc"

View file

@ -1,9 +1,9 @@
//
// ModelCache.h
// libraries/model-networking/src/model-networking
// libraries/model-networking
//
// Created by Sam Gateau on 9/21/15.
// Copyright 2013 High Fidelity, Inc.
// Created by Zach Pomerantz on 3/15/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -12,200 +12,176 @@
#ifndef hifi_ModelCache_h
#define hifi_ModelCache_h
#include <QMap>
#include <QRunnable>
#include <DependencyManager.h>
#include <ResourceCache.h>
#include "FBXReader.h"
#include "OBJReader.h"
#include <gpu/Batch.h>
#include <gpu/Stream.h>
#include <model/Material.h>
#include <model/Asset.h>
class NetworkGeometry;
class NetworkMesh;
#include "FBXReader.h"
#include "TextureCache.h"
// Alias instead of derive to avoid copying
using NetworkMesh = model::Mesh;
class NetworkTexture;
class NetworkMaterial;
class NetworkShape;
class NetworkGeometry;
/// Stores cached geometry.
class GeometryMappingResource;
/// Stores cached model geometries.
class ModelCache : public ResourceCache, public Dependency {
Q_OBJECT
SINGLETON_DEPENDENCY
public:
virtual QSharedPointer<Resource> createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
bool delayLoad, const void* extra);
/// Loads a model geometry from the specified URL.
std::shared_ptr<NetworkGeometry> getGeometry(const QUrl& url,
const QVariantHash& mapping = QVariantHash(), const QUrl& textureBaseUrl = QUrl());
/// Loads geometry from the specified URL.
/// \param fallback a fallback URL to load if the desired one is unavailable
/// \param delayLoad if true, don't load the geometry immediately; wait until load is first requested
QSharedPointer<NetworkGeometry> getGeometry(const QUrl& url, const QUrl& fallback = QUrl(), bool delayLoad = false);
protected:
friend class GeometryMappingResource;
virtual QSharedPointer<Resource> createResource(const QUrl& url,
const QSharedPointer<Resource>& fallback, bool delayLoad, const void* extra);
private:
ModelCache();
virtual ~ModelCache();
virtual ~ModelCache() = default;
};
QHash<QUrl, QWeakPointer<NetworkGeometry> > _networkGeometry;
class Geometry {
public:
using Pointer = std::shared_ptr<Geometry>;
Geometry() = default;
Geometry(const Geometry& geometry);
// Immutable over lifetime
using NetworkMeshes = std::vector<std::shared_ptr<const NetworkMesh>>;
using NetworkShapes = std::vector<std::shared_ptr<const NetworkShape>>;
// Mutable, but must retain structure of vector
using NetworkMaterials = std::vector<std::shared_ptr<NetworkMaterial>>;
const FBXGeometry& getGeometry() const { return *_geometry; }
const NetworkMeshes& getMeshes() const { return *_meshes; }
const std::shared_ptr<const NetworkMaterial> getShapeMaterial(int shapeID) const;
const QVariantMap getTextures() const;
void setTextures(const QVariantMap& textureMap);
virtual bool areTexturesLoaded() const;
// Returns true if any albedo texture has a non-masking alpha channel.
// This can only be known after areTexturesLoaded().
bool hasTransparentTextures() const { return _hasTransparentTextures; }
protected:
friend class GeometryMappingResource;
// Shared across all geometries, constant throughout lifetime
std::shared_ptr<const FBXGeometry> _geometry;
std::shared_ptr<const NetworkMeshes> _meshes;
std::shared_ptr<const NetworkShapes> _shapes;
// Copied to each geometry, mutable throughout lifetime via setTextures
NetworkMaterials _materials;
private:
mutable bool _areTexturesLoaded { false };
mutable bool _hasTransparentTextures { false };
};
/// A geometry loaded from the network.
class GeometryResource : public Resource, public Geometry {
public:
using Pointer = QSharedPointer<GeometryResource>;
GeometryResource(const QUrl& url) : Resource(url) {}
virtual bool areTexturesLoaded() const { return isLoaded() && Geometry::areTexturesLoaded(); }
protected:
friend class GeometryMappingResource;
virtual bool isCacheable() const override { return _loaded && _isCacheable; }
bool _isCacheable { true };
};
class NetworkGeometry : public QObject {
Q_OBJECT
public:
// mapping is only used if url is a .fbx or .obj file, it is essentially the content of an fst file.
// if delayLoad is true, the url will not be immediately downloaded.
// use the attemptRequest method to initiate the download.
NetworkGeometry(const QUrl& url, bool delayLoad, const QVariantHash& mapping, const QUrl& textureBaseUrl = QUrl());
~NetworkGeometry();
using Pointer = std::shared_ptr<NetworkGeometry>;
const QUrl& getURL() const { return _url; }
NetworkGeometry() = delete;
NetworkGeometry(const GeometryResource::Pointer& networkGeometry);
void attemptRequest();
const QUrl& getURL() { return _resource->getURL(); }
// true when the geometry is loaded (but maybe not it's associated textures)
bool isLoaded() const;
// true when the requested geometry and its textures are loaded.
bool isLoadedWithTextures() const;
// true if the albedo texture has a non-masking alpha channel.
// This can only be known after isLoadedWithTextures().
bool hasTransparentTextures() const { return _hasTransparentTextures; }
// WARNING: only valid when isLoaded returns true.
const FBXGeometry& getFBXGeometry() const { return *_geometry; }
const std::vector<std::unique_ptr<NetworkMesh>>& getMeshes() const { return _meshes; }
// const model::AssetPointer getAsset() const { return _asset; }
// model::MeshPointer getShapeMesh(int shapeID);
// int getShapePart(int shapeID);
// This would be the final verison
// model::MaterialPointer getShapeMaterial(int shapeID);
const NetworkMaterial* getShapeMaterial(int shapeID);
void setTextureWithNameToURL(const QString& name, const QUrl& url);
QStringList getTextureNames() const;
enum Error {
MissingFilenameInMapping = 0,
MappingRequestError,
ModelRequestError,
ModelParseError
};
/// Returns the geometry, if it is loaded (must be checked!)
const Geometry::Pointer& getGeometry() { return _instance; }
signals:
// Fired when everything has downloaded and parsed successfully.
void onSuccess(NetworkGeometry& networkGeometry, FBXGeometry& fbxGeometry);
/// Emitted when the NetworkGeometry loads (or fails to)
void finished(bool success);
// Fired when something went wrong.
void onFailure(NetworkGeometry& networkGeometry, Error error);
private slots:
void resourceFinished(bool success);
void resourceRefreshed();
public slots:
void textureLoaded(const QWeakPointer<NetworkTexture>& networkTexture);
private:
GeometryResource::Pointer _resource;
Geometry::Pointer _instance { nullptr };
};
protected slots:
void mappingRequestDone(const QByteArray data);
void mappingRequestError(QNetworkReply::NetworkError error);
void modelRequestDone(const QByteArray data);
void modelRequestError(QNetworkReply::NetworkError error);
void modelParseSuccess(FBXGeometry* geometry);
void modelParseError(int error, QString str);
class NetworkMaterial : public model::Material {
public:
using MapChannel = model::Material::MapChannel;
NetworkMaterial(const FBXMaterial& material, const QUrl& textureBaseUrl);
protected:
void attemptRequestInternal();
void requestMapping(const QUrl& url);
void requestModel(const QUrl& url);
friend class Geometry;
enum State { DelayState,
RequestMappingState,
RequestModelState,
ParsingModelState,
SuccessState,
ErrorState };
State _state;
class Texture {
public:
QString name;
QSharedPointer<NetworkTexture> texture;
};
using Textures = std::vector<Texture>;
QUrl _url;
QUrl _mappingUrl;
QUrl _modelUrl;
QVariantHash _mapping;
QUrl _textureBaseUrl;
int numTextureLoaded = 0;
Textures _textures;
Resource* _resource = nullptr;
std::unique_ptr<FBXGeometry> _geometry; // This should go away evenutally once we can put everything we need in the model::AssetPointer
std::vector<std::unique_ptr<NetworkMesh>> _meshes;
std::vector<std::unique_ptr<NetworkMaterial>> _materials;
std::vector<std::unique_ptr<NetworkShape>> _shapes;
static const QString NO_TEXTURE;
const QString& getTextureName(MapChannel channel);
void setTextures(const QVariantMap& textureMap);
// The model asset created from this NetworkGeometry
// model::AssetPointer _asset;
const bool& isOriginal() const { return _isOriginal; }
// cache for isLoadedWithTextures()
mutable bool _isLoadedWithTextures = false;
mutable bool _hasTransparentTextures = false;
};
/// Reads geometry in a worker thread.
class GeometryReader : public QObject, public QRunnable {
Q_OBJECT
public:
GeometryReader(const QUrl& url, const QByteArray& data, const QVariantHash& mapping);
virtual void run();
signals:
void onSuccess(FBXGeometry* geometry);
void onError(int error, QString str);
private:
QUrl _url;
QByteArray _data;
QVariantHash _mapping;
};
// Helpers for the ctors
QUrl getTextureUrl(const QUrl& baseUrl, const FBXTexture& fbxTexture);
model::TextureMapPointer fetchTextureMap(const QUrl& baseUrl, const FBXTexture& fbxTexture,
TextureType type, MapChannel channel);
model::TextureMapPointer fetchTextureMap(const QUrl& url, TextureType type, MapChannel channel);
Transform _albedoTransform;
Transform _lightmapTransform;
vec2 _lightmapParams;
bool _isOriginal { true };
};
class NetworkShape {
public:
int _meshID{ -1 };
int _partID{ -1 };
int _materialID{ -1 };
NetworkShape(int mesh, int part, int material) : meshID { mesh }, partID { part }, materialID { material } {}
int meshID { -1 };
int partID { -1 };
int materialID { -1 };
};
class NetworkMaterial {
public:
model::MaterialPointer _material;
QString emissiveTextureName;
QSharedPointer<NetworkTexture> emissiveTexture;
QString albedoTextureName;
QSharedPointer<NetworkTexture> albedoTexture;
QString normalTextureName;
QSharedPointer<NetworkTexture> normalTexture;
QString roughnessTextureName;
QSharedPointer<NetworkTexture> roughnessTexture;
QString metallicTextureName;
QSharedPointer<NetworkTexture> metallicTexture;
QString occlusionTextureName;
QSharedPointer<NetworkTexture> occlusionTexture;
QString lightmapTextureName;
QSharedPointer<NetworkTexture> lightmapTexture;
};
/// The state associated with a single mesh.
class NetworkMesh {
public:
model::MeshPointer _mesh;
};
#endif // hifi_GeometryCache_h
#endif // hifi_ModelCache_h

View file

@ -111,7 +111,7 @@ Box Mesh::evalPartBound(int partNum) const {
return box;
}
Box Mesh::evalPartBounds(int partStart, int partEnd, Boxes& bounds) const {
Box Mesh::evalPartsBound(int partStart, int partEnd) const {
Box totalBound;
auto part = _partBuffer.cbegin<Part>() + partStart;
auto partItEnd = _partBuffer.cbegin<Part>() + partEnd;

View file

@ -108,9 +108,9 @@ public:
// evaluate the bounding box of A part
Box evalPartBound(int partNum) const;
// evaluate the bounding boxes of the parts in the range [start, end[ and fill the bounds parameter
// the returned box is the bounding box of ALL the evaluated part bounds.
Box evalPartBounds(int partStart, int partEnd, Boxes& bounds) const;
// evaluate the bounding boxes of the parts in the range [start, end]
// the returned box is the bounding box of ALL the evaluated parts bound.
Box evalPartsBound(int partStart, int partEnd) const;
static gpu::Primitive topologyToPrimitive(Topology topo) { return static_cast<gpu::Primitive>(topo); }

View file

@ -18,26 +18,34 @@ using namespace gpu;
Material::Material() :
_key(0),
_schemaBuffer(),
_textureMaps() {
// only if created from nothing shall we create the Buffer to store the properties
Schema schema;
_schemaBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Schema), (const gpu::Byte*) &schema));
_textureMaps()
{
// created from nothing: create the Buffer to store the properties
Schema schema;
_schemaBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Schema), (const gpu::Byte*) &schema));
}
Material::Material(const Material& material) :
_key(material._key),
_schemaBuffer(material._schemaBuffer),
_textureMaps(material._textureMaps) {
_textureMaps(material._textureMaps)
{
// copied: create the Buffer to store the properties, avoid holding a ref to the old Buffer
Schema schema;
_schemaBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Schema), (const gpu::Byte*) &schema));
_schemaBuffer.edit<Schema>() = material._schemaBuffer.get<Schema>();
}
Material& Material::operator= (const Material& material) {
_key = (material._key);
_schemaBuffer = (material._schemaBuffer);
_textureMaps = (material._textureMaps);
// copied: create the Buffer to store the properties, avoid holding a ref to the old Buffer
Schema schema;
_schemaBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Schema), (const gpu::Byte*) &schema));
_schemaBuffer.edit<Schema>() = material._schemaBuffer.get<Schema>();
return (*this);
}
@ -83,41 +91,48 @@ void Material::setMetallic(float metallic) {
void Material::setTextureMap(MapChannel channel, const TextureMapPointer& textureMap) {
if (textureMap) {
_key.setMapChannel(channel, (true));
if (channel == MaterialKey::ALBEDO_MAP) {
// clear the previous flags whatever they were:
_key.setOpacityMaskMap(false);
_key.setTranslucentMap(false);
if (textureMap->useAlphaChannel() && textureMap->isDefined() && textureMap->getTextureView().isValid()) {
auto usage = textureMap->getTextureView()._texture->getUsage();
if (usage.isAlpha()) {
// Texture has alpha, is not just a mask or a true transparent channel
if (usage.isAlphaMask()) {
_key.setOpacityMaskMap(true);
_key.setTranslucentMap(false);
} else {
_key.setOpacityMaskMap(false);
_key.setTranslucentMap(true);
}
}
}
}
_textureMaps[channel] = textureMap;
} else {
_key.setMapChannel(channel, (false));
if (channel == MaterialKey::ALBEDO_MAP) {
_key.setOpacityMaskMap(false);
_key.setTranslucentMap(false);
}
_textureMaps.erase(channel);
}
_schemaBuffer.edit<Schema>()._key = (uint32)_key._flags.to_ulong();
if (channel == MaterialKey::ALBEDO_MAP) {
resetOpacityMap();
}
_schemaBuffer.edit<Schema>()._key = (uint32)_key._flags.to_ulong();
}
void Material::resetOpacityMap() const {
// Clear the previous flags
_key.setOpacityMaskMap(false);
_key.setTranslucentMap(false);
const auto& textureMap = getTextureMap(MaterialKey::ALBEDO_MAP);
if (textureMap &&
textureMap->useAlphaChannel() &&
textureMap->isDefined() &&
textureMap->getTextureView().isValid()) {
auto usage = textureMap->getTextureView()._texture->getUsage();
if (usage.isAlpha()) {
if (usage.isAlphaMask()) {
// Texture has alpha, but it is just a mask
_key.setOpacityMaskMap(true);
_key.setTranslucentMap(false);
} else {
// Texture has alpha, it is a true translucency channel
_key.setOpacityMaskMap(false);
_key.setTranslucentMap(true);
}
}
}
_schemaBuffer.edit<Schema>()._key = (uint32)_key._flags.to_ulong();
}

View file

@ -291,15 +291,17 @@ public:
const TextureMaps& getTextureMaps() const { return _textureMaps; }
const TextureMapPointer getTextureMap(MapChannel channel) const;
// Albedo maps cannot have opacity detected until they are loaded
// This method allows const changing of the key/schemaBuffer without touching the map
void resetOpacityMap() const;
// conversion from legacy material properties to PBR equivalent
static float shininessToRoughness(float shininess) { return 1.0f - shininess / 100.0f; }
protected:
MaterialKey _key;
UniformBufferView _schemaBuffer;
private:
mutable MaterialKey _key;
mutable UniformBufferView _schemaBuffer;
TextureMaps _textureMaps;
};
typedef std::shared_ptr< Material > MaterialPointer;

View file

@ -228,7 +228,8 @@ bool AssetClient::getAsset(const QString& hash, DataOffset start, DataOffset end
nodeList->sendPacket(std::move(packet), *assetServer);
_pendingRequests[assetServer][messageID] = { callback, progressCallback };
_pendingRequests[assetServer][messageID] = { QSharedPointer<ReceivedMessage>(), callback, progressCallback };
return true;
} else {
@ -326,6 +327,9 @@ void AssetClient::handleAssetGetReply(QSharedPointer<ReceivedMessage> message, S
if (requestIt != messageCallbackMap.end()) {
auto& callbacks = requestIt->second;
// Store message in case we need to disconnect from it later.
callbacks.message = message;
if (message->isComplete()) {
callbacks.completeCallback(true, error, message->readAll());
} else {
@ -550,6 +554,12 @@ void AssetClient::handleNodeKilled(SharedNodePointer node) {
auto messageMapIt = _pendingRequests.find(node);
if (messageMapIt != _pendingRequests.end()) {
for (const auto& value : messageMapIt->second) {
auto& message = value.second.message;
if (message) {
// Disconnect from all signals emitting from the pending message
disconnect(message.data(), nullptr, this, nullptr);
}
value.second.completeCallback(false, AssetServerError::NoError, QByteArray());
}
messageMapIt->second.clear();

Some files were not shown because too many files have changed in this diff Show more