mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-25 17:14:59 +02:00
Merge remote-tracking branch 'upstream/master' into gcc53
This commit is contained in:
commit
64f6a59bb3
46 changed files with 4756 additions and 483 deletions
|
@ -14,11 +14,9 @@ Script.load("edit.js");
|
|||
Script.load("examples.js");
|
||||
Script.load("selectAudioDevice.js");
|
||||
Script.load("notifications.js");
|
||||
Script.load("users.js");
|
||||
Script.load("controllers/handControllerGrab.js");
|
||||
Script.load("controllers/squeezeHands.js");
|
||||
Script.load("grab.js");
|
||||
Script.load("directory.js");
|
||||
Script.load("dialTone.js");
|
||||
// Script.load("attachedEntitiesManager.js");
|
||||
Script.load("depthReticle.js");
|
||||
|
|
53
examples/libraries/jasmine/hifi-boot.js
Normal file
53
examples/libraries/jasmine/hifi-boot.js
Normal file
|
@ -0,0 +1,53 @@
|
|||
|
||||
(function() {
|
||||
function ConsoleReporter(options) {
|
||||
this.jasmineStarted = function (obj) {
|
||||
print("jasmineStarted: numSpecs = " + obj.totalSpecsDefined);
|
||||
};
|
||||
this.jasmineDone = function (obj) {
|
||||
print("jasmineDone");
|
||||
};
|
||||
this.suiteStarted = function(obj) {
|
||||
print("suiteStarted: \"" + obj.fullName + "\"");
|
||||
};
|
||||
this.suiteDone = function(obj) {
|
||||
print("suiteDone: \"" + obj.fullName + "\" " + obj.status);
|
||||
};
|
||||
this.specStarted = function(obj) {
|
||||
print("specStarted: \"" + obj.fullName + "\"");
|
||||
};
|
||||
this.specDone = function(obj) {
|
||||
print("specDone: \"" + obj.fullName + "\" " + obj.status);
|
||||
|
||||
var i, l = obj.failedExpectations.length;
|
||||
for (i = 0; i < l; i++) {
|
||||
print(" " + obj.failedExpectations[i].message);
|
||||
}
|
||||
};
|
||||
return this;
|
||||
}
|
||||
|
||||
setTimeout = Script.setTimeout;
|
||||
setInterval = Script.setInterval;
|
||||
clearTimeout = Script.clearTimeout;
|
||||
clearInterval = Script.clearInterval;
|
||||
|
||||
var jasmine = jasmineRequire.core(jasmineRequire);
|
||||
|
||||
var env = jasmine.getEnv();
|
||||
|
||||
env.addReporter(new ConsoleReporter());
|
||||
|
||||
var jasmineInterface = jasmineRequire.interface(jasmine, env);
|
||||
|
||||
extend(this, jasmineInterface);
|
||||
|
||||
function extend(destination, source) {
|
||||
for (var property in source) {
|
||||
destination[property] = source[property];
|
||||
}
|
||||
return destination;
|
||||
}
|
||||
|
||||
}());
|
||||
|
3458
examples/libraries/jasmine/jasmine.js
Normal file
3458
examples/libraries/jasmine/jasmine.js
Normal file
File diff suppressed because it is too large
Load diff
|
@ -355,20 +355,36 @@ ToolBar = function(x, y, direction, optionalPersistenceKey, optionalInitialPosit
|
|||
});
|
||||
}
|
||||
};
|
||||
that.windowDimensions = Controller.getViewportDimensions();
|
||||
|
||||
function clamp(value, min, max) {
|
||||
return Math.min(Math.max(value, min), max);
|
||||
}
|
||||
|
||||
var recommendedRect = Controller.getRecommendedOverlayRect();
|
||||
var recommendedDimmensions = { x: recommendedRect.width, y: recommendedRect.height };
|
||||
that.windowDimensions = recommendedDimmensions; // Controller.getViewportDimensions();
|
||||
that.origin = { x: recommendedRect.x, y: recommendedRect.y };
|
||||
// Maybe fixme: Keeping the same percent of the window size isn't always the right thing.
|
||||
// For example, maybe we want "keep the same percentage to whatever two edges are closest to the edge of screen".
|
||||
// If we change that, the places to do so are onResizeViewport, save (maybe), and the initial move based on Settings, below.
|
||||
that.onResizeViewport = function (newSize) { // Can be overridden or extended by clients.
|
||||
var fractionX = that.x / that.windowDimensions.x;
|
||||
var fractionY = that.y / that.windowDimensions.y;
|
||||
that.windowDimensions = newSize || Controller.getViewportDimensions();
|
||||
that.move(fractionX * that.windowDimensions.x, fractionY * that.windowDimensions.y);
|
||||
var recommendedRect = Controller.getRecommendedOverlayRect();
|
||||
var recommendedDimmensions = { x: recommendedRect.width, y: recommendedRect.height };
|
||||
var originRelativeX = (that.x - that.origin.x);
|
||||
var originRelativeY = (that.y - that.origin.y);
|
||||
var fractionX = clamp(originRelativeX / that.windowDimensions.x, 0, 1);
|
||||
var fractionY = clamp(originRelativeY / that.windowDimensions.y, 0, 1);
|
||||
that.windowDimensions = newSize || recommendedDimmensions;
|
||||
that.origin = { x: recommendedRect.x, y: recommendedRect.y };
|
||||
var newX = (fractionX * that.windowDimensions.x) + recommendedRect.x;
|
||||
var newY = (fractionY * that.windowDimensions.y) + recommendedRect.y;
|
||||
that.move(newX, newY);
|
||||
};
|
||||
if (optionalPersistenceKey) {
|
||||
this.fractionKey = optionalPersistenceKey + '.fraction';
|
||||
this.save = function () {
|
||||
var screenSize = Controller.getViewportDimensions();
|
||||
var recommendedRect = Controller.getRecommendedOverlayRect();
|
||||
var screenSize = { x: recommendedRect.width, y: recommendedRect.height };
|
||||
if (screenSize.x > 0 && screenSize.y > 0) {
|
||||
// Guard against invalid screen size that can occur at shut-down.
|
||||
var fraction = {x: that.x / screenSize.x, y: that.y / screenSize.y};
|
||||
|
@ -411,7 +427,9 @@ ToolBar = function(x, y, direction, optionalPersistenceKey, optionalInitialPosit
|
|||
that.move(that.dragOffsetX + event.x, that.dragOffsetY + event.y);
|
||||
};
|
||||
that.checkResize = function () { // Can be overriden or extended, but usually not. See onResizeViewport.
|
||||
var currentWindowSize = Controller.getViewportDimensions();
|
||||
var recommendedRect = Controller.getRecommendedOverlayRect();
|
||||
var currentWindowSize = { x: recommendedRect.width, y: recommendedRect.height };
|
||||
|
||||
if ((currentWindowSize.x !== that.windowDimensions.x) || (currentWindowSize.y !== that.windowDimensions.y)) {
|
||||
that.onResizeViewport(currentWindowSize);
|
||||
}
|
||||
|
@ -434,7 +452,8 @@ ToolBar = function(x, y, direction, optionalPersistenceKey, optionalInitialPosit
|
|||
}
|
||||
if (this.fractionKey || optionalInitialPositionFunction) {
|
||||
var savedFraction = JSON.parse(Settings.getValue(this.fractionKey) || '0'); // getValue can answer empty string
|
||||
var screenSize = Controller.getViewportDimensions();
|
||||
var recommendedRect = Controller.getRecommendedOverlayRect();
|
||||
var screenSize = { x: recommendedRect.width, y: recommendedRect.height };
|
||||
if (savedFraction) {
|
||||
// If we have saved data, keep the toolbar at the same proportion of the screen width/height.
|
||||
that.move(savedFraction.x * screenSize.x, savedFraction.y * screenSize.y);
|
||||
|
|
59
examples/tests/avatarUnitTests.js
Normal file
59
examples/tests/avatarUnitTests.js
Normal file
|
@ -0,0 +1,59 @@
|
|||
|
||||
Script.include("../libraries/jasmine/jasmine.js");
|
||||
Script.include("../libraries/jasmine/hifi-boot.js");
|
||||
|
||||
// Art3mis
|
||||
var DEFAULT_AVATAR_URL = "https://hifi-metaverse.s3-us-west-1.amazonaws.com/marketplace/contents/e76946cc-c272-4adf-9bb6-02cde0a4b57d/8fd984ea6fe1495147a3303f87fa6e23.fst?1460131758";
|
||||
|
||||
var ORIGIN = {x: 0, y: 0, z: 0};
|
||||
var ONE_HUNDRED = {x: 100, y: 100, z: 100};
|
||||
var ROT_IDENT = {x: 0, y: 0, z: 0, w: 1};
|
||||
|
||||
describe("MyAvatar", function () {
|
||||
|
||||
// reload the avatar from scratch before each test.
|
||||
beforeEach(function (done) {
|
||||
MyAvatar.skeletonModelURL = DEFAULT_AVATAR_URL;
|
||||
|
||||
// wait until we are finished loading
|
||||
var id = Script.setInterval(function () {
|
||||
if (MyAvatar.jointNames.length == 72) {
|
||||
// assume we are finished loading.
|
||||
Script.clearInterval(id);
|
||||
MyAvatar.position = ORIGIN;
|
||||
MyAvatar.orientation = ROT_IDENT;
|
||||
// give the avatar 1/2 a second to settle on the ground in the idle pose.
|
||||
Script.setTimeout(function () {
|
||||
done();
|
||||
}, 500);
|
||||
}
|
||||
}, 500);
|
||||
});
|
||||
|
||||
// makes the assumption that there is solid ground somewhat underneath the avatar.
|
||||
it("position and orientation getters", function () {
|
||||
var pos = MyAvatar.position;
|
||||
|
||||
expect(Math.abs(pos.x)).toBeLessThan(0.1);
|
||||
expect(Math.abs(pos.y)).toBeLessThan(1.0);
|
||||
expect(Math.abs(pos.z)).toBeLessThan(0.1);
|
||||
|
||||
var rot = MyAvatar.orientation;
|
||||
expect(Math.abs(rot.x)).toBeLessThan(0.01);
|
||||
expect(Math.abs(rot.y)).toBeLessThan(0.01);
|
||||
expect(Math.abs(rot.z)).toBeLessThan(0.01);
|
||||
expect(Math.abs(1 - rot.w)).toBeLessThan(0.01);
|
||||
});
|
||||
|
||||
it("position and orientation setters", function (done) {
|
||||
MyAvatar.position = ONE_HUNDRED;
|
||||
Script.setTimeout(function () {
|
||||
expect(Vec3.length(Vec3.subtract(MyAvatar.position, ONE_HUNDRED))).toBeLessThan(0.1);
|
||||
done();
|
||||
}, 100);
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
jasmine.getEnv().execute();
|
||||
|
11
examples/tests/playaPerformanceTest.js
Normal file
11
examples/tests/playaPerformanceTest.js
Normal file
|
@ -0,0 +1,11 @@
|
|||
var qml = Script.resolvePath('playaPerformanceTest.qml');
|
||||
qmlWindow = new OverlayWindow({
|
||||
title: 'Test Qml',
|
||||
source: qml,
|
||||
height: 320,
|
||||
width: 640,
|
||||
toolWindow: false,
|
||||
visible: true
|
||||
});
|
||||
|
||||
|
193
examples/tests/playaPerformanceTest.qml
Normal file
193
examples/tests/playaPerformanceTest.qml
Normal file
|
@ -0,0 +1,193 @@
|
|||
import QtQuick 2.5
|
||||
import QtQuick.Controls 1.4
|
||||
|
||||
Rectangle {
|
||||
id: root
|
||||
width: parent ? parent.width : 100
|
||||
height: parent ? parent.height : 100
|
||||
|
||||
signal sendToScript(var message);
|
||||
property var values: [];
|
||||
property var host: AddressManager.hostname
|
||||
|
||||
|
||||
Component.onCompleted: {
|
||||
Window.domainChanged.connect(function(newDomain){
|
||||
if (newDomain !== root.host) {
|
||||
root.host = AddressManager.hostname;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
onHostChanged: {
|
||||
if (root.running) {
|
||||
if (host !== "Dreaming" && host !== "Playa") {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log("PERF new domain " + host)
|
||||
if (host === "Dreaming") {
|
||||
AddressManager.handleLookupString("Playa");
|
||||
return;
|
||||
}
|
||||
|
||||
if (host === "Playa") {
|
||||
console.log("PERF starting timers and frame timing");
|
||||
// If we've arrived, start running the test
|
||||
FrameTimings.start();
|
||||
rotationTimer.start();
|
||||
stopTimer.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function startTest() {
|
||||
console.log("PERF startTest()");
|
||||
root.running = true
|
||||
console.log("PERF current host: " + AddressManager.hostname)
|
||||
// If we're already in playa, we need to go somewhere else...
|
||||
if ("Playa" === AddressManager.hostname) {
|
||||
console.log("PERF Navigating to dreaming")
|
||||
AddressManager.handleLookupString("Dreaming/0,0,0");
|
||||
} else {
|
||||
console.log("PERF Navigating to playa")
|
||||
AddressManager.handleLookupString("Playa");
|
||||
}
|
||||
}
|
||||
|
||||
function stopTest() {
|
||||
console.log("PERF stopTest()");
|
||||
root.running = false;
|
||||
stopTimer.stop();
|
||||
rotationTimer.stop();
|
||||
FrameTimings.finish();
|
||||
root.values = FrameTimings.getValues();
|
||||
AddressManager.handleLookupString("Dreaming/0,0,0");
|
||||
resultGraph.requestPaint();
|
||||
console.log("PERF Value Count: " + root.values.length);
|
||||
console.log("PERF Max: " + FrameTimings.max);
|
||||
console.log("PERF Min: " + FrameTimings.min);
|
||||
console.log("PERF Avg: " + FrameTimings.mean);
|
||||
console.log("PERF StdDev: " + FrameTimings.standardDeviation);
|
||||
}
|
||||
|
||||
function yaw(a) {
|
||||
var y = -Math.sin( a / 2.0 );
|
||||
var w = Math.cos( a / 2.0 );
|
||||
var l = Math.sqrt((y * y) + (w * w));
|
||||
return Qt.quaternion(w / l, 0, y / l, 0);
|
||||
}
|
||||
|
||||
function rotate() {
|
||||
MyAvatar.setOrientationVar(yaw(Date.now() / 1000));
|
||||
}
|
||||
|
||||
property bool running: false
|
||||
|
||||
Timer {
|
||||
id: stopTimer
|
||||
interval: 30 * 1000
|
||||
repeat: false
|
||||
running: false
|
||||
onTriggered: stopTest();
|
||||
}
|
||||
|
||||
Timer {
|
||||
id: rotationTimer
|
||||
interval: 100
|
||||
repeat: true
|
||||
running: false
|
||||
onTriggered: rotate();
|
||||
}
|
||||
|
||||
Row {
|
||||
id: row
|
||||
anchors { left: parent.left; right: parent.right; }
|
||||
spacing: 8
|
||||
Button {
|
||||
text: root.running ? "Stop" : "Run"
|
||||
onClicked: root.running ? stopTest() : startTest();
|
||||
}
|
||||
}
|
||||
|
||||
// Rectangle {
|
||||
// anchors { left: parent.left; right: parent.right; top: row.bottom; topMargin: 8; bottom: parent.bottom; }
|
||||
// //anchors.fill: parent
|
||||
// color: "#7fff0000"
|
||||
// }
|
||||
|
||||
// Return the maximum value from a set of values
|
||||
function vv(i, max) {
|
||||
var perValue = values.length / max;
|
||||
var start = Math.floor(perValue * i);
|
||||
var end = Math.min(values.length, Math.floor(start + perValue));
|
||||
var result = 0;
|
||||
for (var j = start; j <= end; ++j) {
|
||||
result = Math.max(result, values[j]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Canvas {
|
||||
id: resultGraph
|
||||
anchors { left: parent.left; right: parent.right; top: row.bottom; margins: 16; bottom: parent.bottom; }
|
||||
property real maxValue: 200;
|
||||
property real perFrame: 10000;
|
||||
property real k1: (5 / maxValue) * height;
|
||||
property real k2: (10 / maxValue) * height;
|
||||
property real k3: (100 / maxValue) * height;
|
||||
|
||||
onPaint: {
|
||||
var ctx = getContext("2d");
|
||||
if (values.length === 0) {
|
||||
ctx.fillStyle = Qt.rgba(1, 0, 0, 1);
|
||||
ctx.fillRect(0, 0, width, height);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
//ctx.setTransform(1, 0, 0, -1, 0, 0);
|
||||
ctx.fillStyle = Qt.rgba(0, 0, 0, 1);
|
||||
ctx.fillRect(0, 0, width, height);
|
||||
|
||||
ctx.strokeStyle= "gray";
|
||||
ctx.lineWidth="1";
|
||||
ctx.beginPath();
|
||||
for (var i = 0; i < width; ++i) {
|
||||
var value = vv(i, width); //values[Math.min(i, values.length - 1)];
|
||||
value /= 10000;
|
||||
value /= maxValue;
|
||||
ctx.moveTo(i, height);
|
||||
ctx.lineTo(i, height - (height * value));
|
||||
}
|
||||
ctx.stroke();
|
||||
|
||||
ctx.strokeStyle= "green";
|
||||
ctx.lineWidth="2";
|
||||
ctx.beginPath();
|
||||
var lineHeight = height - k1;
|
||||
ctx.moveTo(0, lineHeight);
|
||||
ctx.lineTo(width, lineHeight);
|
||||
ctx.stroke();
|
||||
|
||||
ctx.strokeStyle= "yellow";
|
||||
ctx.lineWidth="2";
|
||||
ctx.beginPath();
|
||||
lineHeight = height - k2;
|
||||
ctx.moveTo(0, lineHeight);
|
||||
ctx.lineTo(width, lineHeight);
|
||||
ctx.stroke();
|
||||
|
||||
ctx.strokeStyle= "red";
|
||||
ctx.lineWidth="2";
|
||||
ctx.beginPath();
|
||||
lineHeight = height - k3;
|
||||
ctx.moveTo(0, lineHeight);
|
||||
ctx.lineTo(width, lineHeight);
|
||||
ctx.stroke();
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -23,6 +23,8 @@ Hifi.AvatarInputs {
|
|||
readonly property int mirrorWidth: 265
|
||||
readonly property int iconSize: 24
|
||||
readonly property int iconPadding: 5
|
||||
|
||||
readonly property bool shouldReposition: true
|
||||
|
||||
Settings {
|
||||
category: "Overlay.AvatarInputs"
|
||||
|
|
|
@ -21,8 +21,11 @@ FocusScope {
|
|||
objectName: "desktop"
|
||||
anchors.fill: parent
|
||||
|
||||
onHeightChanged: d.repositionAll();
|
||||
onWidthChanged: d.repositionAll();
|
||||
property rect recommendedRect: rect(0,0,0,0);
|
||||
|
||||
onHeightChanged: d.handleSizeChanged();
|
||||
|
||||
onWidthChanged: d.handleSizeChanged();
|
||||
|
||||
// Controls and windows can trigger this signal to ensure the desktop becomes visible
|
||||
// when they're opened.
|
||||
|
@ -50,6 +53,20 @@ FocusScope {
|
|||
QtObject {
|
||||
id: d
|
||||
|
||||
function handleSizeChanged() {
|
||||
var oldRecommendedRect = recommendedRect;
|
||||
var newRecommendedRectJS = Controller.getRecommendedOverlayRect();
|
||||
var newRecommendedRect = Qt.rect(newRecommendedRectJS.x, newRecommendedRectJS.y,
|
||||
newRecommendedRectJS.width,
|
||||
newRecommendedRectJS.height);
|
||||
|
||||
if (oldRecommendedRect != Qt.rect(0,0,0,0)
|
||||
&& oldRecommendedRect != newRecommendedRect) {
|
||||
d.repositionAll();
|
||||
}
|
||||
recommendedRect = newRecommendedRect;
|
||||
}
|
||||
|
||||
function findChild(item, name) {
|
||||
for (var i = 0; i < item.children.length; ++i) {
|
||||
if (item.children[i].objectName === name) {
|
||||
|
@ -202,12 +219,42 @@ FocusScope {
|
|||
// }
|
||||
}
|
||||
|
||||
function getRepositionChildren(predicate) {
|
||||
var currentWindows = [];
|
||||
if (!desktop) {
|
||||
console.log("Could not find desktop");
|
||||
return currentWindows;
|
||||
}
|
||||
|
||||
for (var i = 0; i < desktop.children.length; ++i) {
|
||||
var child = desktop.children[i];
|
||||
if (child.shouldReposition === true && (!predicate || predicate(child))) {
|
||||
currentWindows.push(child)
|
||||
}
|
||||
}
|
||||
return currentWindows;
|
||||
}
|
||||
|
||||
function repositionAll() {
|
||||
var oldRecommendedRect = recommendedRect;
|
||||
var oldRecommendedDimmensions = { x: oldRecommendedRect.width, y: oldRecommendedRect.height };
|
||||
var newRecommendedRect = Controller.getRecommendedOverlayRect();
|
||||
var newRecommendedDimmensions = { x: newRecommendedRect.width, y: newRecommendedRect.height };
|
||||
var windows = d.getTopLevelWindows();
|
||||
for (var i = 0; i < windows.length; ++i) {
|
||||
reposition(windows[i]);
|
||||
var targetWindow = windows[i];
|
||||
if (targetWindow.visible) {
|
||||
repositionWindow(targetWindow, true, oldRecommendedRect, oldRecommendedDimmensions, newRecommendedRect, newRecommendedDimmensions);
|
||||
}
|
||||
}
|
||||
|
||||
// also reposition the other children that aren't top level windows but want to be repositioned
|
||||
var otherChildren = d.getRepositionChildren();
|
||||
for (var i = 0; i < otherChildren.length; ++i) {
|
||||
var child = otherChildren[i];
|
||||
repositionWindow(child, true, oldRecommendedRect, oldRecommendedDimmensions, newRecommendedRect, newRecommendedDimmensions);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,38 +279,56 @@ FocusScope {
|
|||
targetWindow.focus = true;
|
||||
}
|
||||
|
||||
reposition(targetWindow);
|
||||
var oldRecommendedRect = recommendedRect;
|
||||
var oldRecommendedDimmensions = { x: oldRecommendedRect.width, y: oldRecommendedRect.height };
|
||||
var newRecommendedRect = Controller.getRecommendedOverlayRect();
|
||||
var newRecommendedDimmensions = { x: newRecommendedRect.width, y: newRecommendedRect.height };
|
||||
repositionWindow(targetWindow, false, oldRecommendedRect, oldRecommendedDimmensions, newRecommendedRect, newRecommendedDimmensions);
|
||||
|
||||
showDesktop();
|
||||
}
|
||||
|
||||
function reposition(item) {
|
||||
function repositionWindow(targetWindow, forceReposition,
|
||||
oldRecommendedRect, oldRecommendedDimmensions, newRecommendedRect, newRecommendedDimmensions) {
|
||||
|
||||
if (desktop.width === 0 || desktop.height === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
var targetWindow = d.getDesktopWindow(item);
|
||||
if (!targetWindow) {
|
||||
console.warn("Could not find top level window for " + item);
|
||||
return;
|
||||
}
|
||||
|
||||
var recommended = Controller.getRecommendedOverlayRect();
|
||||
var maxX = recommended.x + recommended.width;
|
||||
var maxY = recommended.y + recommended.height;
|
||||
var newPosition = Qt.vector2d(targetWindow.x, targetWindow.y);
|
||||
// If the window is completely offscreen, reposition it
|
||||
if ((targetWindow.x > desktop.width || (targetWindow.x + targetWindow.width) < 0) ||
|
||||
(targetWindow.y > desktop.height || (targetWindow.y + targetWindow.height) < 0)) {
|
||||
|
||||
// if we asked to force reposition, or if the window is completely outside of the recommended rectangle, reposition it
|
||||
if (forceReposition || (targetWindow.x > maxX || (targetWindow.x + targetWindow.width) < recommended.x) ||
|
||||
(targetWindow.y > maxY || (targetWindow.y + targetWindow.height) < recommended.y)) {
|
||||
newPosition.x = -1
|
||||
newPosition.y = -1
|
||||
}
|
||||
|
||||
|
||||
if (newPosition.x === -1 && newPosition.y === -1) {
|
||||
// Set initial window position
|
||||
// var minPosition = Qt.vector2d(-windowRect.x, -windowRect.y);
|
||||
// var maxPosition = Qt.vector2d(desktop.width - windowRect.width, desktop.height - windowRect.height);
|
||||
// newPosition = Utils.clampVector(newPosition, minPosition, maxPosition);
|
||||
// newPosition = Utils.randomPosition(minPosition, maxPosition);
|
||||
newPosition = Qt.vector2d(desktop.width / 2 - targetWindow.width / 2,
|
||||
desktop.height / 2 - targetWindow.height / 2);
|
||||
var originRelativeX = (targetWindow.x - oldRecommendedRect.x);
|
||||
var originRelativeY = (targetWindow.y - oldRecommendedRect.y);
|
||||
if (isNaN(originRelativeX)) {
|
||||
originRelativeX = 0;
|
||||
}
|
||||
if (isNaN(originRelativeY)) {
|
||||
originRelativeY = 0;
|
||||
}
|
||||
var fractionX = Utils.clamp(originRelativeX / oldRecommendedDimmensions.x, 0, 1);
|
||||
var fractionY = Utils.clamp(originRelativeY / oldRecommendedDimmensions.y, 0, 1);
|
||||
|
||||
var newX = (fractionX * newRecommendedDimmensions.x) + newRecommendedRect.x;
|
||||
var newY = (fractionY * newRecommendedDimmensions.y) + newRecommendedRect.y;
|
||||
|
||||
newPosition = Qt.vector2d(newX, newY);
|
||||
}
|
||||
targetWindow.x = newPosition.x;
|
||||
targetWindow.y = newPosition.y;
|
||||
|
|
|
@ -147,6 +147,8 @@
|
|||
#include "Util.h"
|
||||
#include "InterfaceParentFinder.h"
|
||||
|
||||
#include "FrameTimingsScriptingInterface.h"
|
||||
|
||||
// On Windows PC, NVidia Optimus laptop, we want to enable NVIDIA GPU
|
||||
// FIXME seems to be broken.
|
||||
#if defined(Q_OS_WIN)
|
||||
|
@ -193,12 +195,7 @@ static const uint32_t INVALID_FRAME = UINT32_MAX;
|
|||
|
||||
static const float PHYSICS_READY_RANGE = 3.0f; // how far from avatar to check for entities that aren't ready for simulation
|
||||
|
||||
#ifndef __APPLE__
|
||||
static const QString DESKTOP_LOCATION = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation);
|
||||
#else
|
||||
// Temporary fix to Qt bug: http://stackoverflow.com/questions/16194475
|
||||
static const QString DESKTOP_LOCATION = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation).append("/script.js");
|
||||
#endif
|
||||
|
||||
Setting::Handle<int> maxOctreePacketsPerSecond("maxOctreePPS", DEFAULT_MAX_OCTREE_PPS);
|
||||
|
||||
|
@ -1233,6 +1230,9 @@ void Application::cleanupBeforeQuit() {
|
|||
}
|
||||
|
||||
Application::~Application() {
|
||||
_entityClipboard->eraseAllOctreeElements();
|
||||
_entityClipboard.reset();
|
||||
|
||||
EntityTreePointer tree = getEntities()->getTree();
|
||||
tree->setSimulation(nullptr);
|
||||
|
||||
|
@ -1242,7 +1242,7 @@ Application::~Application() {
|
|||
_physicsEngine->setCharacterController(nullptr);
|
||||
|
||||
// remove avatars from physics engine
|
||||
DependencyManager::get<AvatarManager>()->clearOtherAvatars();
|
||||
DependencyManager::get<AvatarManager>()->clearAllAvatars();
|
||||
VectorOfMotionStates motionStates;
|
||||
DependencyManager::get<AvatarManager>()->getObjectsToRemoveFromPhysics(motionStates);
|
||||
_physicsEngine->removeObjects(motionStates);
|
||||
|
@ -1337,6 +1337,8 @@ void Application::initializeGL() {
|
|||
InfoView::show(INFO_HELP_PATH, true);
|
||||
}
|
||||
|
||||
FrameTimingsScriptingInterface _frameTimingsScriptingInterface;
|
||||
|
||||
extern void setupPreferences();
|
||||
|
||||
void Application::initializeUi() {
|
||||
|
@ -1381,6 +1383,8 @@ void Application::initializeUi() {
|
|||
rootContext->setContextProperty("Messages", DependencyManager::get<MessagesClient>().data());
|
||||
rootContext->setContextProperty("Recording", DependencyManager::get<RecordingScriptingInterface>().data());
|
||||
rootContext->setContextProperty("Preferences", DependencyManager::get<Preferences>().data());
|
||||
rootContext->setContextProperty("AddressManager", DependencyManager::get<AddressManager>().data());
|
||||
rootContext->setContextProperty("FrameTimings", &_frameTimingsScriptingInterface);
|
||||
|
||||
rootContext->setContextProperty("TREE_SCALE", TREE_SCALE);
|
||||
rootContext->setContextProperty("Quat", new Quat());
|
||||
|
@ -1424,6 +1428,7 @@ void Application::initializeUi() {
|
|||
rootContext->setContextProperty("Reticle", getApplicationCompositor().getReticleInterface());
|
||||
|
||||
rootContext->setContextProperty("ApplicationCompositor", &getApplicationCompositor());
|
||||
|
||||
|
||||
_glWidget->installEventFilter(offscreenUi.data());
|
||||
offscreenUi->setMouseTranslator([=](const QPointF& pt) {
|
||||
|
@ -1466,9 +1471,9 @@ void Application::initializeUi() {
|
|||
});
|
||||
}
|
||||
|
||||
|
||||
void Application::paintGL() {
|
||||
updateHeartbeat();
|
||||
|
||||
// Some plugins process message events, potentially leading to
|
||||
// re-entering a paint event. don't allow further processing if this
|
||||
// happens
|
||||
|
@ -1486,6 +1491,7 @@ void Application::paintGL() {
|
|||
_frameCount++;
|
||||
_frameCounter.increment();
|
||||
|
||||
auto lastPaintBegin = usecTimestampNow();
|
||||
PROFILE_RANGE_EX(__FUNCTION__, 0xff0000ff, (uint64_t)_frameCount);
|
||||
PerformanceTimer perfTimer("paintGL");
|
||||
|
||||
|
@ -1738,6 +1744,9 @@ void Application::paintGL() {
|
|||
batch.resetStages();
|
||||
});
|
||||
}
|
||||
|
||||
uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
|
||||
_frameTimingsScriptingInterface.addValue(lastPaintDuration);
|
||||
}
|
||||
|
||||
void Application::runTests() {
|
||||
|
@ -2672,8 +2681,6 @@ void Application::idle(uint64_t now) {
|
|||
_overlayConductor.setEnabled(Menu::getInstance()->isOptionChecked(MenuOption::Overlays));
|
||||
}
|
||||
|
||||
|
||||
|
||||
// If the offscreen Ui has something active that is NOT the root, then assume it has keyboard focus.
|
||||
auto offscreenUi = DependencyManager::get<OffscreenUi>();
|
||||
if (_keyboardDeviceHasFocus && offscreenUi && offscreenUi->getWindow()->activeFocusItem() != offscreenUi->getRootItem()) {
|
||||
|
@ -4885,19 +4892,44 @@ QRect Application::getRenderingGeometry() const {
|
|||
}
|
||||
|
||||
glm::uvec2 Application::getUiSize() const {
|
||||
return getActiveDisplayPlugin()->getRecommendedUiSize();
|
||||
static const uint MIN_SIZE = 1;
|
||||
glm::uvec2 result(MIN_SIZE);
|
||||
if (_displayPlugin) {
|
||||
result = getActiveDisplayPlugin()->getRecommendedUiSize();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
QRect Application::getRecommendedOverlayRect() const {
|
||||
auto uiSize = getUiSize();
|
||||
QRect result(0, 0, uiSize.x, uiSize.y);
|
||||
if (_displayPlugin) {
|
||||
result = getActiveDisplayPlugin()->getRecommendedOverlayRect();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
QSize Application::getDeviceSize() const {
|
||||
return fromGlm(getActiveDisplayPlugin()->getRecommendedRenderSize());
|
||||
static const int MIN_SIZE = 1;
|
||||
QSize result(MIN_SIZE, MIN_SIZE);
|
||||
if (_displayPlugin) {
|
||||
result = fromGlm(getActiveDisplayPlugin()->getRecommendedRenderSize());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool Application::isThrottleRendering() const {
|
||||
return getActiveDisplayPlugin()->isThrottled();
|
||||
if (_displayPlugin) {
|
||||
return getActiveDisplayPlugin()->isThrottled();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Application::hasFocus() const {
|
||||
return getActiveDisplayPlugin()->hasFocus();
|
||||
if (_displayPlugin) {
|
||||
return getActiveDisplayPlugin()->hasFocus();
|
||||
}
|
||||
return (QApplication::activeWindow() != nullptr);
|
||||
}
|
||||
|
||||
glm::vec2 Application::getViewportDimensions() const {
|
||||
|
|
|
@ -117,6 +117,7 @@ public:
|
|||
QRect getRenderingGeometry() const;
|
||||
|
||||
glm::uvec2 getUiSize() const;
|
||||
QRect getRecommendedOverlayRect() const;
|
||||
QSize getDeviceSize() const;
|
||||
bool hasFocus() const;
|
||||
|
||||
|
|
53
interface/src/FrameTimingsScriptingInterface.cpp
Normal file
53
interface/src/FrameTimingsScriptingInterface.cpp
Normal file
|
@ -0,0 +1,53 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/04/04
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "FrameTimingsScriptingInterface.h"
|
||||
|
||||
#include <TextureCache.h>
|
||||
|
||||
void FrameTimingsScriptingInterface::start() {
|
||||
_values.clear();
|
||||
DependencyManager::get<TextureCache>()->setUnusedResourceCacheSize(0);
|
||||
_values.reserve(8192);
|
||||
_active = true;
|
||||
}
|
||||
|
||||
void FrameTimingsScriptingInterface::addValue(uint64_t value) {
|
||||
if (_active) {
|
||||
_values.push_back(value);
|
||||
}
|
||||
}
|
||||
|
||||
void FrameTimingsScriptingInterface::finish() {
|
||||
_active = false;
|
||||
uint64_t total = 0;
|
||||
_min = std::numeric_limits<uint64_t>::max();
|
||||
_max = std::numeric_limits<uint64_t>::lowest();
|
||||
size_t count = _values.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
const uint64_t& value = _values[i];
|
||||
_max = std::max(_max, value);
|
||||
_min = std::min(_min, value);
|
||||
total += value;
|
||||
}
|
||||
_mean = (float)total / (float)count;
|
||||
float deviationTotal = 0;
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
float deviation = _values[i] - _mean;
|
||||
deviationTotal += deviation*deviation;
|
||||
}
|
||||
_stdDev = sqrt(deviationTotal / (float)count);
|
||||
}
|
||||
|
||||
QVariantList FrameTimingsScriptingInterface::getValues() const {
|
||||
QVariantList result;
|
||||
for (quint64 v : _values) {
|
||||
result << QVariant(v);
|
||||
}
|
||||
return result;
|
||||
}
|
38
interface/src/FrameTimingsScriptingInterface.h
Normal file
38
interface/src/FrameTimingsScriptingInterface.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/04/04
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#pragma once
|
||||
#include <stdint.h>
|
||||
#include <QtCore/QObject>
|
||||
|
||||
class FrameTimingsScriptingInterface : public QObject {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(float mean READ getMean CONSTANT)
|
||||
Q_PROPERTY(float max READ getMax CONSTANT)
|
||||
Q_PROPERTY(float min READ getMin CONSTANT)
|
||||
Q_PROPERTY(float standardDeviation READ getStandardDeviation CONSTANT)
|
||||
public:
|
||||
Q_INVOKABLE void start();
|
||||
Q_INVOKABLE void addValue(uint64_t value);
|
||||
Q_INVOKABLE void finish();
|
||||
Q_INVOKABLE QVariantList getValues() const;
|
||||
|
||||
|
||||
uint64_t getMax() const { return _max; }
|
||||
uint64_t getMin() const { return _min; }
|
||||
float getStandardDeviation() const { return _stdDev; }
|
||||
float getMean() const { return _mean; }
|
||||
|
||||
protected:
|
||||
std::vector<uint64_t> _values;
|
||||
bool _active { false };
|
||||
uint64_t _max { 0 };
|
||||
uint64_t _min { 0 };
|
||||
float _stdDev { 0 };
|
||||
float _mean { 0 };
|
||||
};
|
|
@ -14,7 +14,6 @@
|
|||
#include <mutex>
|
||||
|
||||
#include <QElapsedTimer>
|
||||
#include <gpu/Context.h>
|
||||
#include <NumericalConstants.h>
|
||||
#include <DependencyManager.h>
|
||||
#include <GeometryCache.h>
|
||||
|
@ -42,26 +41,11 @@ static const float TAU = 6.28318530717958f;
|
|||
//static const float MILKY_WAY_RATIO = 0.4f;
|
||||
static const char* UNIFORM_TIME_NAME = "iGlobalTime";
|
||||
|
||||
|
||||
|
||||
Stars::Stars() {
|
||||
}
|
||||
|
||||
Stars::~Stars() {
|
||||
}
|
||||
|
||||
// Produce a random float value between 0 and 1
|
||||
static float frand() {
|
||||
return (float)rand() / (float)RAND_MAX;
|
||||
}
|
||||
|
||||
// Produce a random radian value between 0 and 2 PI (TAU)
|
||||
/*
|
||||
static float rrand() {
|
||||
return frand() * TAU;
|
||||
}
|
||||
*/
|
||||
|
||||
// http://mathworld.wolfram.com/SpherePointPicking.html
|
||||
static vec2 randPolar() {
|
||||
vec2 result(frand(), frand());
|
||||
|
@ -115,59 +99,56 @@ struct StarVertex {
|
|||
vec4 colorAndSize;
|
||||
};
|
||||
|
||||
// FIXME star colors
|
||||
void Stars::render(RenderArgs* renderArgs, float alpha) {
|
||||
static gpu::BufferPointer vertexBuffer;
|
||||
static gpu::Stream::FormatPointer streamFormat;
|
||||
static gpu::Element positionElement, colorElement;
|
||||
static gpu::PipelinePointer _gridPipeline;
|
||||
static gpu::PipelinePointer _starsPipeline;
|
||||
static int32_t _timeSlot{ -1 };
|
||||
static std::once_flag once;
|
||||
static const int STARS_VERTICES_SLOT{ 0 };
|
||||
static const int STARS_COLOR_SLOT{ 1 };
|
||||
|
||||
const int VERTICES_SLOT = 0;
|
||||
const int COLOR_SLOT = 1;
|
||||
gpu::PipelinePointer Stars::_gridPipeline{};
|
||||
gpu::PipelinePointer Stars::_starsPipeline{};
|
||||
int32_t Stars::_timeSlot{ -1 };
|
||||
|
||||
std::call_once(once, [&] {
|
||||
{
|
||||
auto vs = gpu::Shader::createVertex(std::string(standardTransformPNTC_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(starsGrid_frag));
|
||||
auto program = gpu::Shader::createProgram(vs, ps);
|
||||
gpu::Shader::makeProgram((*program));
|
||||
_timeSlot = program->getBuffers().findLocation(UNIFORM_TIME_NAME);
|
||||
if (_timeSlot == gpu::Shader::INVALID_LOCATION) {
|
||||
_timeSlot = program->getUniforms().findLocation(UNIFORM_TIME_NAME);
|
||||
}
|
||||
auto state = gpu::StatePointer(new gpu::State());
|
||||
// enable decal blend
|
||||
state->setDepthTest(gpu::State::DepthTest(false));
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA);
|
||||
_gridPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
{
|
||||
auto vs = gpu::Shader::createVertex(std::string(stars_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(stars_frag));
|
||||
auto program = gpu::Shader::createProgram(vs, ps);
|
||||
gpu::Shader::makeProgram((*program));
|
||||
auto state = gpu::StatePointer(new gpu::State());
|
||||
// enable decal blend
|
||||
state->setDepthTest(gpu::State::DepthTest(false));
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
state->setAntialiasedLineEnable(true); // line smoothing also smooth points
|
||||
state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA);
|
||||
_starsPipeline = gpu::Pipeline::create(program, state);
|
||||
|
||||
void Stars::init() {
|
||||
if (!_gridPipeline) {
|
||||
auto vs = gpu::Shader::createVertex(std::string(standardTransformPNTC_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(starsGrid_frag));
|
||||
auto program = gpu::Shader::createProgram(vs, ps);
|
||||
gpu::Shader::makeProgram((*program));
|
||||
_timeSlot = program->getBuffers().findLocation(UNIFORM_TIME_NAME);
|
||||
if (_timeSlot == gpu::Shader::INVALID_LOCATION) {
|
||||
_timeSlot = program->getUniforms().findLocation(UNIFORM_TIME_NAME);
|
||||
}
|
||||
auto state = gpu::StatePointer(new gpu::State());
|
||||
// enable decal blend
|
||||
state->setDepthTest(gpu::State::DepthTest(false));
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA);
|
||||
_gridPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
if (!_starsPipeline) {
|
||||
auto vs = gpu::Shader::createVertex(std::string(stars_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(stars_frag));
|
||||
auto program = gpu::Shader::createProgram(vs, ps);
|
||||
gpu::Shader::makeProgram((*program));
|
||||
auto state = gpu::StatePointer(new gpu::State());
|
||||
// enable decal blend
|
||||
state->setDepthTest(gpu::State::DepthTest(false));
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
state->setAntialiasedLineEnable(true); // line smoothing also smooth points
|
||||
state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA);
|
||||
_starsPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
unsigned limit = STARFIELD_NUM_STARS;
|
||||
std::vector<StarVertex> points;
|
||||
points.resize(limit);
|
||||
|
||||
{ // generate stars
|
||||
QElapsedTimer startTime;
|
||||
startTime.start();
|
||||
|
||||
vertexBuffer.reset(new gpu::Buffer);
|
||||
|
||||
srand(STARFIELD_SEED);
|
||||
unsigned limit = STARFIELD_NUM_STARS;
|
||||
std::vector<StarVertex> points;
|
||||
points.resize(limit);
|
||||
for (size_t star = 0; star < limit; ++star) {
|
||||
points[star].position = vec4(fromPolar(randPolar()), 1);
|
||||
float size = frand() * 2.5f + 0.5f;
|
||||
|
@ -179,16 +160,32 @@ void Stars::render(RenderArgs* renderArgs, float alpha) {
|
|||
points[star].colorAndSize = vec4(color, size);
|
||||
}
|
||||
}
|
||||
|
||||
double timeDiff = (double)startTime.nsecsElapsed() / 1000000.0; // ns to ms
|
||||
qDebug() << "Total time to generate stars: " << timeDiff << " msec";
|
||||
}
|
||||
|
||||
gpu::Element positionElement, colorElement;
|
||||
const size_t VERTEX_STRIDE = sizeof(StarVertex);
|
||||
|
||||
vertexBuffer->append(VERTEX_STRIDE * limit, (const gpu::Byte*)&points[0]);
|
||||
streamFormat.reset(new gpu::Stream::Format()); // 1 for everyone
|
||||
streamFormat->setAttribute(gpu::Stream::POSITION, STARS_VERTICES_SLOT, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW), 0);
|
||||
streamFormat->setAttribute(gpu::Stream::COLOR, STARS_COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::RGBA));
|
||||
positionElement = streamFormat->getAttributes().at(gpu::Stream::POSITION)._element;
|
||||
colorElement = streamFormat->getAttributes().at(gpu::Stream::COLOR)._element;
|
||||
|
||||
size_t offset = offsetof(StarVertex, position);
|
||||
positionView = gpu::BufferView(vertexBuffer, offset, vertexBuffer->getSize(), VERTEX_STRIDE, positionElement);
|
||||
|
||||
offset = offsetof(StarVertex, colorAndSize);
|
||||
colorView = gpu::BufferView(vertexBuffer, offset, vertexBuffer->getSize(), VERTEX_STRIDE, colorElement);
|
||||
}
|
||||
|
||||
// FIXME star colors
|
||||
void Stars::render(RenderArgs* renderArgs, float alpha) {
|
||||
std::call_once(once, [&]{ init(); });
|
||||
|
||||
vertexBuffer->append(sizeof(StarVertex) * limit, (const gpu::Byte*)&points[0]);
|
||||
streamFormat.reset(new gpu::Stream::Format()); // 1 for everyone
|
||||
streamFormat->setAttribute(gpu::Stream::POSITION, VERTICES_SLOT, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW), 0);
|
||||
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::RGBA));
|
||||
positionElement = streamFormat->getAttributes().at(gpu::Stream::POSITION)._element;
|
||||
colorElement = streamFormat->getAttributes().at(gpu::Stream::COLOR)._element;
|
||||
});
|
||||
|
||||
auto modelCache = DependencyManager::get<ModelCache>();
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
|
@ -210,17 +207,10 @@ void Stars::render(RenderArgs* renderArgs, float alpha) {
|
|||
batch._glUniform1f(_timeSlot, secs);
|
||||
geometryCache->renderCube(batch);
|
||||
|
||||
static const size_t VERTEX_STRIDE = sizeof(StarVertex);
|
||||
size_t offset = offsetof(StarVertex, position);
|
||||
gpu::BufferView posView(vertexBuffer, offset, vertexBuffer->getSize(), VERTEX_STRIDE, positionElement);
|
||||
offset = offsetof(StarVertex, colorAndSize);
|
||||
gpu::BufferView colView(vertexBuffer, offset, vertexBuffer->getSize(), VERTEX_STRIDE, colorElement);
|
||||
|
||||
// Render the stars
|
||||
batch.setPipeline(_starsPipeline);
|
||||
|
||||
batch.setInputFormat(streamFormat);
|
||||
batch.setInputBuffer(VERTICES_SLOT, posView);
|
||||
batch.setInputBuffer(COLOR_SLOT, colView);
|
||||
batch.setInputBuffer(STARS_VERTICES_SLOT, positionView);
|
||||
batch.setInputBuffer(STARS_COLOR_SLOT, colorView);
|
||||
batch.draw(gpu::Primitive::POINTS, STARFIELD_NUM_STARS);
|
||||
}
|
||||
|
|
|
@ -12,21 +12,37 @@
|
|||
#ifndef hifi_Stars_h
|
||||
#define hifi_Stars_h
|
||||
|
||||
#include <gpu/Context.h>
|
||||
|
||||
class RenderArgs;
|
||||
|
||||
// Starfield rendering component.
|
||||
class Stars {
|
||||
public:
|
||||
Stars();
|
||||
~Stars();
|
||||
Stars() = default;
|
||||
~Stars() = default;
|
||||
|
||||
Stars(Stars const&) = delete;
|
||||
Stars& operator=(Stars const&) = delete;
|
||||
|
||||
// Renders the starfield from a local viewer's perspective.
|
||||
// The parameters specifiy the field of view.
|
||||
void render(RenderArgs* args, float alpha);
|
||||
|
||||
private:
|
||||
// don't copy/assign
|
||||
Stars(Stars const&); // = delete;
|
||||
Stars& operator=(Stars const&); // delete;
|
||||
// Pipelines
|
||||
static gpu::PipelinePointer _gridPipeline;
|
||||
static gpu::PipelinePointer _starsPipeline;
|
||||
static int32_t _timeSlot;
|
||||
|
||||
// Buffers
|
||||
gpu::BufferPointer vertexBuffer;
|
||||
gpu::Stream::FormatPointer streamFormat;
|
||||
gpu::BufferView positionView;
|
||||
gpu::BufferView colorView;
|
||||
std::once_flag once;
|
||||
|
||||
void init();
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -77,7 +77,6 @@ AvatarManager::AvatarManager(QObject* parent) :
|
|||
}
|
||||
|
||||
AvatarManager::~AvatarManager() {
|
||||
_myAvatar->die();
|
||||
}
|
||||
|
||||
void AvatarManager::init() {
|
||||
|
@ -250,6 +249,17 @@ void AvatarManager::clearOtherAvatars() {
|
|||
_myAvatar->clearLookAtTargetAvatar();
|
||||
}
|
||||
|
||||
void AvatarManager::clearAllAvatars() {
|
||||
clearOtherAvatars();
|
||||
|
||||
QWriteLocker locker(&_hashLock);
|
||||
|
||||
_myAvatar->die();
|
||||
_myAvatar.reset();
|
||||
|
||||
_avatarHash.clear();
|
||||
}
|
||||
|
||||
void AvatarManager::setLocalLights(const QVector<AvatarManager::LocalLight>& localLights) {
|
||||
if (QThread::currentThread() != thread()) {
|
||||
QMetaObject::invokeMethod(this, "setLocalLights", Q_ARG(const QVector<AvatarManager::LocalLight>&, localLights));
|
||||
|
|
|
@ -45,6 +45,7 @@ public:
|
|||
void updateOtherAvatars(float deltaTime);
|
||||
|
||||
void clearOtherAvatars();
|
||||
void clearAllAvatars();
|
||||
|
||||
bool shouldShowReceiveStats() const { return _shouldShowReceiveStats; }
|
||||
|
||||
|
|
|
@ -204,6 +204,15 @@ MyAvatar::~MyAvatar() {
|
|||
_lookAtTargetAvatar.reset();
|
||||
}
|
||||
|
||||
void MyAvatar::setOrientationVar(const QVariant& newOrientationVar) {
|
||||
Avatar::setOrientation(quatFromVariant(newOrientationVar));
|
||||
}
|
||||
|
||||
QVariant MyAvatar::getOrientationVar() const {
|
||||
return quatToVariant(Avatar::getOrientation());
|
||||
}
|
||||
|
||||
|
||||
// virtual
|
||||
void MyAvatar::simulateAttachments(float deltaTime) {
|
||||
// don't update attachments here, do it in harvestResultsFromPhysicsSimulation()
|
||||
|
|
|
@ -105,6 +105,10 @@ public:
|
|||
// thread safe
|
||||
Q_INVOKABLE glm::mat4 getSensorToWorldMatrix() const;
|
||||
|
||||
Q_INVOKABLE void setOrientationVar(const QVariant& newOrientationVar);
|
||||
Q_INVOKABLE QVariant getOrientationVar() const;
|
||||
|
||||
|
||||
// Pass a recent sample of the HMD to the avatar.
|
||||
// This can also update the avatar's position to follow the HMD
|
||||
// as it moves through the world.
|
||||
|
|
|
@ -80,6 +80,11 @@ glm::vec2 ControllerScriptingInterface::getViewportDimensions() const {
|
|||
return qApp->getUiSize();
|
||||
}
|
||||
|
||||
QVariant ControllerScriptingInterface::getRecommendedOverlayRect() const {
|
||||
auto rect = qApp->getRecommendedOverlayRect();
|
||||
return qRectToVariant(rect);
|
||||
}
|
||||
|
||||
controller::InputController* ControllerScriptingInterface::createInputController(const QString& deviceName, const QString& tracker) {
|
||||
// This is where we retrieve the Device Tracker category and then the sub tracker within it
|
||||
auto icIt = _inputControllers.find(0);
|
||||
|
|
|
@ -96,6 +96,7 @@ public slots:
|
|||
virtual void releaseJoystick(int joystickIndex);
|
||||
|
||||
virtual glm::vec2 getViewportDimensions() const;
|
||||
virtual QVariant getRecommendedOverlayRect() const;
|
||||
|
||||
/// Factory to create an InputController
|
||||
virtual controller::InputController* createInputController(const QString& deviceName, const QString& tracker);
|
||||
|
|
|
@ -34,6 +34,7 @@ static const float reticleSize = TWO_PI / 100.0f;
|
|||
static QString _tooltipId;
|
||||
|
||||
const uvec2 CompositorHelper::VIRTUAL_SCREEN_SIZE = uvec2(3960, 1188); // ~10% more pixel density than old version, 72dx240d FOV
|
||||
const QRect CompositorHelper::VIRTUAL_SCREEN_RECOMMENDED_OVERLAY_RECT = QRect(956, 0, 2048, 1188); // don't include entire width only center 2048
|
||||
const float CompositorHelper::VIRTUAL_UI_ASPECT_RATIO = (float)VIRTUAL_SCREEN_SIZE.x / (float)VIRTUAL_SCREEN_SIZE.y;
|
||||
const vec2 CompositorHelper::VIRTUAL_UI_TARGET_FOV = vec2(PI * 3.0f / 2.0f, PI * 3.0f / 2.0f / VIRTUAL_UI_ASPECT_RATIO);
|
||||
const vec2 CompositorHelper::MOUSE_EXTENTS_ANGULAR_SIZE = vec2(PI * 2.0f, PI * 0.95f); // horizontal: full sphere, vertical: ~5deg from poles
|
||||
|
|
|
@ -42,6 +42,7 @@ class CompositorHelper : public QObject, public Dependency {
|
|||
Q_PROPERTY(bool reticleOverDesktop READ getReticleOverDesktop WRITE setReticleOverDesktop)
|
||||
public:
|
||||
static const uvec2 VIRTUAL_SCREEN_SIZE;
|
||||
static const QRect VIRTUAL_SCREEN_RECOMMENDED_OVERLAY_RECT;
|
||||
static const float VIRTUAL_UI_ASPECT_RATIO;
|
||||
static const vec2 VIRTUAL_UI_TARGET_FOV;
|
||||
static const vec2 MOUSE_EXTENTS_ANGULAR_SIZE;
|
||||
|
|
|
@ -34,6 +34,11 @@ glm::uvec2 HmdDisplayPlugin::getRecommendedUiSize() const {
|
|||
return CompositorHelper::VIRTUAL_SCREEN_SIZE;
|
||||
}
|
||||
|
||||
QRect HmdDisplayPlugin::getRecommendedOverlayRect() const {
|
||||
return CompositorHelper::VIRTUAL_SCREEN_RECOMMENDED_OVERLAY_RECT;
|
||||
}
|
||||
|
||||
|
||||
bool HmdDisplayPlugin::internalActivate() {
|
||||
_monoPreview = _container->getBoolSetting("monoPreview", DEFAULT_MONO_VIEW);
|
||||
|
||||
|
|
|
@ -26,6 +26,8 @@ public:
|
|||
void setEyeRenderPose(uint32_t frameIndex, Eye eye, const glm::mat4& pose) override final;
|
||||
bool isDisplayVisible() const override { return isHmdMounted(); }
|
||||
|
||||
QRect getRecommendedOverlayRect() const override final;
|
||||
|
||||
virtual glm::mat4 getHeadPose() const override;
|
||||
|
||||
|
||||
|
|
|
@ -926,7 +926,8 @@ void EntityItem::simulateKinematicMotion(float timeElapsed, bool setFlags) {
|
|||
glm::quat dQ = computeBulletRotationStep(localAngularVelocity, dt);
|
||||
rotation = glm::normalize(dQ * rotation);
|
||||
|
||||
setRotation(rotation);
|
||||
bool success;
|
||||
setOrientation(rotation, success, false);
|
||||
}
|
||||
|
||||
setLocalAngularVelocity(localAngularVelocity);
|
||||
|
@ -1983,10 +1984,10 @@ void EntityItem::locationChanged(bool tellPhysics) {
|
|||
requiresRecalcBoxes();
|
||||
if (tellPhysics) {
|
||||
_dirtyFlags |= Simulation::DIRTY_TRANSFORM;
|
||||
}
|
||||
EntityTreePointer tree = getTree();
|
||||
if (tree) {
|
||||
tree->entityChanged(getThisPointer());
|
||||
EntityTreePointer tree = getTree();
|
||||
if (tree) {
|
||||
tree->entityChanged(getThisPointer());
|
||||
}
|
||||
}
|
||||
SpatiallyNestable::locationChanged(tellPhysics); // tell all the children, also
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ void EntitySimulation::updateEntities() {
|
|||
callUpdateOnEntitiesThatNeedIt(now);
|
||||
moveSimpleKinematics(now);
|
||||
updateEntitiesInternal(now);
|
||||
PerformanceTimer perfTimer("sortingEntities");
|
||||
sortEntitiesThatMoved();
|
||||
}
|
||||
|
||||
|
@ -133,10 +134,8 @@ void EntitySimulation::callUpdateOnEntitiesThatNeedIt(const quint64& now) {
|
|||
|
||||
// protected
|
||||
void EntitySimulation::sortEntitiesThatMoved() {
|
||||
QMutexLocker lock(&_mutex);
|
||||
// NOTE: this is only for entities that have been moved by THIS EntitySimulation.
|
||||
// External changes to entity position/shape are expected to be sorted outside of the EntitySimulation.
|
||||
PerformanceTimer perfTimer("sortingEntities");
|
||||
MovingEntitiesOperator moveOperator(_entityTree);
|
||||
AACube domainBounds(glm::vec3((float)-HALF_TREE_SCALE), (float)TREE_SCALE);
|
||||
SetOfEntities::iterator itemItr = _entitiesToSort.begin();
|
||||
|
|
|
@ -92,7 +92,7 @@ protected:
|
|||
|
||||
void expireMortalEntities(const quint64& now);
|
||||
void callUpdateOnEntitiesThatNeedIt(const quint64& now);
|
||||
void sortEntitiesThatMoved();
|
||||
virtual void sortEntitiesThatMoved();
|
||||
|
||||
QMutex _mutex{ QMutex::Recursive };
|
||||
|
||||
|
|
|
@ -132,3 +132,12 @@ void SimpleEntitySimulation::clearEntitiesInternal() {
|
|||
_entitiesThatNeedSimulationOwner.clear();
|
||||
}
|
||||
|
||||
void SimpleEntitySimulation::sortEntitiesThatMoved() {
|
||||
SetOfEntities::iterator itemItr = _entitiesToSort.begin();
|
||||
while (itemItr != _entitiesToSort.end()) {
|
||||
EntityItemPointer entity = *itemItr;
|
||||
entity->computePuffedQueryAACube();
|
||||
++itemItr;
|
||||
}
|
||||
EntitySimulation::sortEntitiesThatMoved();
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ protected:
|
|||
virtual void changeEntityInternal(EntityItemPointer entity) override;
|
||||
virtual void clearEntitiesInternal() override;
|
||||
|
||||
virtual void sortEntitiesThatMoved() override;
|
||||
|
||||
SetOfEntities _entitiesWithSimulationOwner;
|
||||
SetOfEntities _entitiesThatNeedSimulationOwner;
|
||||
quint64 _nextOwnerlessExpiry { 0 };
|
||||
|
|
|
@ -126,7 +126,7 @@ ShapeWrapperPtr loadPlane(ProgramPtr program, float aspect) {
|
|||
}
|
||||
|
||||
ShapeWrapperPtr loadSkybox(ProgramPtr program) {
|
||||
return ShapeWrapperPtr(new shapes::ShapeWrapper({ { "Position" } }, shapes::SkyBox(), *program));
|
||||
return ShapeWrapperPtr(new shapes::ShapeWrapper(std::initializer_list<std::string>{ "Position" }, shapes::SkyBox(), *program));
|
||||
}
|
||||
|
||||
// Return a point's cartesian coordinates on a sphere from pitch and yaw
|
||||
|
|
|
@ -139,7 +139,7 @@ public:
|
|||
GLuint _virtualSize; // theorical size as expected
|
||||
GLuint _numLevels{ 0 };
|
||||
|
||||
void transferMip(GLenum target, const Texture::PixelsPointer& mip) const;
|
||||
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
|
||||
|
||||
// The owning texture
|
||||
const Texture& _gpuTexture;
|
||||
|
|
|
@ -93,25 +93,22 @@ void GLBackend::GLTexture::createTexture() {
|
|||
(void)CHECK_GL_ERROR();
|
||||
// Fixme: this usage of TexStorage doesn;t work wtih compressed texture, altuogh it should.
|
||||
// GO through the process of allocating the correct storage
|
||||
/* if (GLEW_VERSION_4_2 && !texture.getTexelFormat().isCompressed()) {
|
||||
glTexStorage2D(_target, _numLevels, texelFormat.internalFormat, width, height);
|
||||
(void)CHECK_GL_ERROR();
|
||||
} else*/
|
||||
{
|
||||
if (GLEW_VERSION_4_2 && !_gpuTexture.getTexelFormat().isCompressed()) {
|
||||
glTexStorage2D(_target, _numLevels, texelFormat.internalFormat, width, height);
|
||||
(void)CHECK_GL_ERROR();
|
||||
} else {
|
||||
glTexParameteri(_target, GL_TEXTURE_BASE_LEVEL, 0);
|
||||
glTexParameteri(_target, GL_TEXTURE_MAX_LEVEL, _numLevels - 1);
|
||||
|
||||
// for (int l = 0; l < _numLevels; l++) {
|
||||
{ int l = 0;
|
||||
if (_gpuTexture.getType() == gpu::Texture::TEX_CUBE) {
|
||||
for (size_t face = 0; face < CUBE_NUM_FACES; face++) {
|
||||
glTexImage2D(CUBE_FACE_LAYOUT[face], l, texelFormat.internalFormat, width, height, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
for (uint16_t l = 0; l < _numLevels; l++) {
|
||||
if (_gpuTexture.getType() == gpu::Texture::TEX_CUBE) {
|
||||
for (size_t face = 0; face < CUBE_NUM_FACES; face++) {
|
||||
glTexImage2D(CUBE_FACE_LAYOUT[face], l, texelFormat.internalFormat, width, height, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
}
|
||||
} else {
|
||||
glTexImage2D(_target, l, texelFormat.internalFormat, width, height, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
}
|
||||
} else {
|
||||
glTexImage2D(_target, l, texelFormat.internalFormat, width, height, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
}
|
||||
width = std::max(1, (width / 2));
|
||||
height = std::max(1, (height / 2));
|
||||
width = std::max(1, (width / 2));
|
||||
height = std::max(1, (height / 2));
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
@ -213,9 +210,13 @@ bool GLBackend::GLTexture::isReady() const {
|
|||
}
|
||||
|
||||
// Move content bits from the CPU to the GPU for a given mip / face
|
||||
void GLBackend::GLTexture::transferMip(GLenum target, const Texture::PixelsPointer& mip) const {
|
||||
void GLBackend::GLTexture::transferMip(uint16_t mipLevel, uint8_t face) const {
|
||||
auto mip = _gpuTexture.accessStoredMipFace(mipLevel, face);
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuTexture.getTexelFormat(), mip->getFormat());
|
||||
glTexSubImage2D(target, 0, 0, 0, _gpuTexture.getWidth(), _gpuTexture.getHeight(), texelFormat.format, texelFormat.type, mip->readData());
|
||||
GLenum target = _target == GL_TEXTURE_2D ? GL_TEXTURE_2D : CUBE_FACE_LAYOUT[face];
|
||||
uvec2 size = uvec2(_gpuTexture.getWidth(), _gpuTexture.getHeight());
|
||||
size >>= mipLevel;
|
||||
glTexSubImage2D(target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
@ -234,16 +235,20 @@ void GLBackend::GLTexture::transfer() const {
|
|||
// GO through the process of allocating the correct storage and/or update the content
|
||||
switch (_gpuTexture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(0)) {
|
||||
transferMip(GL_TEXTURE_2D, _gpuTexture.accessStoredMipFace(0));
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i)) {
|
||||
transferMip(i);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
// transfer pixels from each faces
|
||||
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(0, f)) {
|
||||
transferMip(CUBE_FACE_LAYOUT[f], _gpuTexture.accessStoredMipFace(0, f));
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i, f)) {
|
||||
transferMip(i, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -269,12 +274,21 @@ void GLBackend::GLTexture::postTransfer() {
|
|||
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
|
||||
switch (_gpuTexture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
_gpuTexture.notifyMipFaceGPULoaded(0, 0);
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i)) {
|
||||
_gpuTexture.notifyMipFaceGPULoaded(i);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
for (uint8_t f = 0; f < CUBE_NUM_FACES; ++f) {
|
||||
_gpuTexture.notifyMipFaceGPULoaded(0, f);
|
||||
// transfer pixels from each faces
|
||||
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i, f)) {
|
||||
_gpuTexture.notifyMipFaceGPULoaded(i, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -345,7 +359,7 @@ GLuint GLBackend::getTextureID(const TexturePointer& texture, bool sync) {
|
|||
} else {
|
||||
object = Backend::getGPUObject<GLBackend::GLTexture>(*texture);
|
||||
}
|
||||
if (object) {
|
||||
if (object && object->getSyncState() == GLTexture::Idle) {
|
||||
return object->_texture;
|
||||
} else {
|
||||
return 0;
|
||||
|
|
|
@ -428,7 +428,7 @@ public:
|
|||
Stamp getSamplerStamp() const { return _samplerStamp; }
|
||||
|
||||
// Only callable by the Backend
|
||||
void notifyMipFaceGPULoaded(uint16 level, uint8 face) const { return _storage->notifyMipFaceGPULoaded(level, face); }
|
||||
void notifyMipFaceGPULoaded(uint16 level, uint8 face = 0) const { return _storage->notifyMipFaceGPULoaded(level, face); }
|
||||
|
||||
const GPUObjectPointer gpuObject {};
|
||||
|
||||
|
|
|
@ -154,21 +154,63 @@ NetworkTexturePointer TextureCache::getTexture(const QUrl& url, TextureType type
|
|||
return ResourceCache::getResource(url, QUrl(), content.isEmpty(), &extra).staticCast<NetworkTexture>();
|
||||
}
|
||||
|
||||
/// Returns a texture version of an image file
|
||||
gpu::TexturePointer TextureCache::getImageTexture(const QString& path) {
|
||||
QImage image = QImage(path).mirrored(false, true);
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB);
|
||||
if (image.hasAlphaChannel()) {
|
||||
formatGPU = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
|
||||
formatMip = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::BGRA);
|
||||
|
||||
TextureCache::TextureLoaderFunc getTextureLoaderForType(TextureType type) {
|
||||
switch (type) {
|
||||
case ALBEDO_TEXTURE: {
|
||||
return model::TextureUsage::createAlbedoTextureFromImage;
|
||||
break;
|
||||
}
|
||||
case EMISSIVE_TEXTURE: {
|
||||
return model::TextureUsage::createEmissiveTextureFromImage;
|
||||
break;
|
||||
}
|
||||
case LIGHTMAP_TEXTURE: {
|
||||
return model::TextureUsage::createLightmapTextureFromImage;
|
||||
break;
|
||||
}
|
||||
case CUBE_TEXTURE: {
|
||||
return model::TextureUsage::createCubeTextureFromImage;
|
||||
break;
|
||||
}
|
||||
case BUMP_TEXTURE: {
|
||||
return model::TextureUsage::createNormalTextureFromBumpImage;
|
||||
break;
|
||||
}
|
||||
case NORMAL_TEXTURE: {
|
||||
return model::TextureUsage::createNormalTextureFromNormalImage;
|
||||
break;
|
||||
}
|
||||
case ROUGHNESS_TEXTURE: {
|
||||
return model::TextureUsage::createRoughnessTextureFromImage;
|
||||
break;
|
||||
}
|
||||
case GLOSS_TEXTURE: {
|
||||
return model::TextureUsage::createRoughnessTextureFromGlossImage;
|
||||
break;
|
||||
}
|
||||
case SPECULAR_TEXTURE: {
|
||||
return model::TextureUsage::createMetallicTextureFromImage;
|
||||
break;
|
||||
}
|
||||
case CUSTOM_TEXTURE: {
|
||||
Q_ASSERT(false);
|
||||
return TextureCache::TextureLoaderFunc();
|
||||
break;
|
||||
}
|
||||
case DEFAULT_TEXTURE:
|
||||
default: {
|
||||
return model::TextureUsage::create2DTextureFromImage;
|
||||
break;
|
||||
}
|
||||
}
|
||||
gpu::TexturePointer texture = gpu::TexturePointer(
|
||||
gpu::Texture::create2D(formatGPU, image.width(), image.height(),
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
texture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
texture->autoGenerateMips(-1);
|
||||
return texture;
|
||||
}
|
||||
|
||||
/// Returns a texture version of an image file
|
||||
gpu::TexturePointer TextureCache::getImageTexture(const QString& path, TextureType type) {
|
||||
QImage image = QImage(path);
|
||||
auto loader = getTextureLoaderForType(type);
|
||||
return gpu::TexturePointer(loader(image, QUrl::fromLocalFile(path).fileName().toStdString()));
|
||||
}
|
||||
|
||||
QSharedPointer<Resource> TextureCache::createResource(const QUrl& url,
|
||||
|
@ -203,53 +245,10 @@ NetworkTexture::NetworkTexture(const QUrl& url, const TextureLoaderFunc& texture
|
|||
}
|
||||
|
||||
NetworkTexture::TextureLoaderFunc NetworkTexture::getTextureLoader() const {
|
||||
switch (_type) {
|
||||
case ALBEDO_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createAlbedoTextureFromImage);
|
||||
break;
|
||||
}
|
||||
case EMISSIVE_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createEmissiveTextureFromImage);
|
||||
break;
|
||||
}
|
||||
case LIGHTMAP_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createLightmapTextureFromImage);
|
||||
break;
|
||||
}
|
||||
case CUBE_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createCubeTextureFromImage);
|
||||
break;
|
||||
}
|
||||
case BUMP_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createNormalTextureFromBumpImage);
|
||||
break;
|
||||
}
|
||||
case NORMAL_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createNormalTextureFromNormalImage);
|
||||
break;
|
||||
}
|
||||
case ROUGHNESS_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createRoughnessTextureFromImage);
|
||||
break;
|
||||
}
|
||||
case GLOSS_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createRoughnessTextureFromGlossImage);
|
||||
break;
|
||||
}
|
||||
case SPECULAR_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createMetallicTextureFromImage);
|
||||
break;
|
||||
}
|
||||
case CUSTOM_TEXTURE: {
|
||||
return _textureLoader;
|
||||
break;
|
||||
}
|
||||
case DEFAULT_TEXTURE:
|
||||
default: {
|
||||
return TextureLoaderFunc(model::TextureUsage::create2DTextureFromImage);
|
||||
break;
|
||||
}
|
||||
if (_type == CUSTOM_TEXTURE) {
|
||||
return _textureLoader;
|
||||
}
|
||||
return getTextureLoaderForType(_type);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ public:
|
|||
const gpu::TexturePointer& getNormalFittingTexture();
|
||||
|
||||
/// Returns a texture version of an image file
|
||||
static gpu::TexturePointer getImageTexture(const QString& path);
|
||||
static gpu::TexturePointer getImageTexture(const QString& path, TextureType type = DEFAULT_TEXTURE);
|
||||
|
||||
/// Loads a texture from the specified URL.
|
||||
NetworkTexturePointer getTexture(const QUrl& url, TextureType type = DEFAULT_TEXTURE,
|
||||
|
|
|
@ -150,8 +150,8 @@ gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImag
|
|||
QImage image = process2DImageColor(srcImage, validAlpha, alphaAsMask);
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
gpu::Element formatGPU;
|
||||
gpu::Element formatMip;
|
||||
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
|
||||
|
@ -171,6 +171,14 @@ gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImag
|
|||
|
||||
if (generateMips) {
|
||||
theTexture->autoGenerateMips(-1);
|
||||
auto levels = theTexture->maxMip();
|
||||
uvec2 size(image.width(), image.height());
|
||||
for (uint8_t i = 1; i <= levels; ++i) {
|
||||
size >>= 1;
|
||||
size = glm::max(size, uvec2(1));
|
||||
image = image.scaled(size.x, size.y, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||
theTexture->assignStoredMip(i, formatMip, image.byteCount(), image.constBits());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -291,7 +299,6 @@ gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcIm
|
|||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB);
|
||||
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
theTexture->autoGenerateMips(-1);
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
|
||||
#include "AssetUtils.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <QtCore/QCryptographicHash>
|
||||
#include <QtNetwork/QAbstractNetworkCache>
|
||||
|
||||
|
@ -29,12 +31,15 @@ QByteArray hashData(const QByteArray& data) {
|
|||
|
||||
QByteArray loadFromCache(const QUrl& url) {
|
||||
if (auto cache = NetworkAccessManager::getInstance().cache()) {
|
||||
if (auto ioDevice = cache->data(url)) {
|
||||
|
||||
// caller is responsible for the deletion of the ioDevice, hence the unique_ptr
|
||||
if (auto ioDevice = std::unique_ptr<QIODevice>(cache->data(url))) {
|
||||
qCDebug(asset_client) << url.toDisplayString() << "loaded from disk cache.";
|
||||
return ioDevice->readAll();
|
||||
} else {
|
||||
qCDebug(asset_client) << url.toDisplayString() << "not in disk cache";
|
||||
}
|
||||
|
||||
} else {
|
||||
qCWarning(asset_client) << "No disk cache to load assets from.";
|
||||
}
|
||||
|
@ -49,7 +54,8 @@ bool saveToCache(const QUrl& url, const QByteArray& file) {
|
|||
metaData.setSaveToDisk(true);
|
||||
metaData.setLastModified(QDateTime::currentDateTime());
|
||||
metaData.setExpirationDate(QDateTime()); // Never expires
|
||||
|
||||
|
||||
// ioDevice is managed by the cache and should either be passed back to insert or remove!
|
||||
if (auto ioDevice = cache->prepare(metaData)) {
|
||||
ioDevice->write(file);
|
||||
cache->insert(ioDevice);
|
||||
|
|
|
@ -28,6 +28,97 @@
|
|||
(((x) > (max)) ? (max) :\
|
||||
(x)))
|
||||
|
||||
void ResourceCacheSharedItems::appendActiveRequest(QWeakPointer<Resource> resource) {
|
||||
Lock lock(_mutex);
|
||||
_loadingRequests.append(resource);
|
||||
}
|
||||
|
||||
void ResourceCacheSharedItems::appendPendingRequest(QWeakPointer<Resource> resource) {
|
||||
Lock lock(_mutex);
|
||||
_pendingRequests.append(resource);
|
||||
}
|
||||
|
||||
QList<QSharedPointer<Resource>> ResourceCacheSharedItems::getPendingRequests() {
|
||||
QList<QSharedPointer<Resource>> result;
|
||||
|
||||
{
|
||||
Lock lock(_mutex);
|
||||
foreach(QSharedPointer<Resource> resource, _pendingRequests) {
|
||||
if (resource) {
|
||||
result.append(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
uint32_t ResourceCacheSharedItems::getPendingRequestsCount() const {
|
||||
Lock lock(_mutex);
|
||||
return _pendingRequests.size();
|
||||
}
|
||||
|
||||
QList<QSharedPointer<Resource>> ResourceCacheSharedItems::getLoadingRequests() {
|
||||
QList<QSharedPointer<Resource>> result;
|
||||
|
||||
{
|
||||
Lock lock(_mutex);
|
||||
foreach(QSharedPointer<Resource> resource, _loadingRequests) {
|
||||
if (resource) {
|
||||
result.append(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void ResourceCacheSharedItems::removeRequest(QWeakPointer<Resource> resource) {
|
||||
Lock lock(_mutex);
|
||||
// resource can only be removed if it still has a ref-count, as
|
||||
// QWeakPointer has no operator== implementation for two weak ptrs, so
|
||||
// manually loop in case resource has been freed.
|
||||
for (int i = 0; i < _loadingRequests.size();) {
|
||||
auto request = _loadingRequests.at(i);
|
||||
// Clear our resource and any freed resources
|
||||
if (!request || request.data() == resource.data()) {
|
||||
_loadingRequests.removeAt(i);
|
||||
continue;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
QSharedPointer<Resource> ResourceCacheSharedItems::getHighestPendingRequest() {
|
||||
Lock lock(_mutex);
|
||||
// look for the highest priority pending request
|
||||
int highestIndex = -1;
|
||||
float highestPriority = -FLT_MAX;
|
||||
QSharedPointer<Resource> highestResource;
|
||||
|
||||
for (int i = 0; i < _pendingRequests.size();) {
|
||||
// Clear any freed resources
|
||||
auto resource = _pendingRequests.at(i).lock();
|
||||
if (!resource) {
|
||||
_pendingRequests.removeAt(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check load priority
|
||||
float priority = resource->getLoadPriority();
|
||||
if (priority >= highestPriority) {
|
||||
highestPriority = priority;
|
||||
highestIndex = i;
|
||||
highestResource = resource;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
if (highestIndex >= 0) {
|
||||
_pendingRequests.takeAt(highestIndex);
|
||||
}
|
||||
|
||||
return highestResource;
|
||||
}
|
||||
|
||||
ResourceCache::ResourceCache(QObject* parent) : QObject(parent) {
|
||||
auto& domainHandler = DependencyManager::get<NodeList>()->getDomainHandler();
|
||||
connect(&domainHandler, &DomainHandler::disconnectedFromDomain,
|
||||
|
@ -264,81 +355,7 @@ void ResourceCache::updateTotalSize(const qint64& oldSize, const qint64& newSize
|
|||
_totalResourcesSize += (newSize - oldSize);
|
||||
emit dirty();
|
||||
}
|
||||
|
||||
void ResourceCacheSharedItems::appendActiveRequest(QWeakPointer<Resource> resource) {
|
||||
Lock lock(_mutex);
|
||||
_loadingRequests.append(resource);
|
||||
}
|
||||
|
||||
void ResourceCacheSharedItems::appendPendingRequest(QWeakPointer<Resource> resource) {
|
||||
Lock lock(_mutex);
|
||||
_pendingRequests.append(resource);
|
||||
}
|
||||
|
||||
QList<QSharedPointer<Resource>> ResourceCacheSharedItems::getPendingRequests() {
|
||||
QList<QSharedPointer<Resource>> result;
|
||||
|
||||
{
|
||||
Lock lock(_mutex);
|
||||
foreach(QSharedPointer<Resource> resource, _pendingRequests) {
|
||||
if (resource) {
|
||||
result.append(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
uint32_t ResourceCacheSharedItems::getPendingRequestsCount() const {
|
||||
Lock lock(_mutex);
|
||||
return _pendingRequests.size();
|
||||
}
|
||||
|
||||
QList<QSharedPointer<Resource>> ResourceCacheSharedItems::getLoadingRequests() {
|
||||
QList<QSharedPointer<Resource>> result;
|
||||
|
||||
{
|
||||
Lock lock(_mutex);
|
||||
foreach(QSharedPointer<Resource> resource, _loadingRequests) {
|
||||
if (resource) {
|
||||
result.append(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void ResourceCacheSharedItems::removeRequest(QWeakPointer<Resource> resource) {
|
||||
Lock lock(_mutex);
|
||||
_loadingRequests.removeAll(resource);
|
||||
}
|
||||
|
||||
QSharedPointer<Resource> ResourceCacheSharedItems::getHighestPendingRequest() {
|
||||
Lock lock(_mutex);
|
||||
// look for the highest priority pending request
|
||||
int highestIndex = -1;
|
||||
float highestPriority = -FLT_MAX;
|
||||
QSharedPointer<Resource> highestResource;
|
||||
for (int i = 0; i < _pendingRequests.size();) {
|
||||
auto resource = _pendingRequests.at(i).lock();
|
||||
if (!resource) {
|
||||
_pendingRequests.removeAt(i);
|
||||
continue;
|
||||
}
|
||||
float priority = resource->getLoadPriority();
|
||||
if (priority >= highestPriority) {
|
||||
highestPriority = priority;
|
||||
highestIndex = i;
|
||||
highestResource = resource;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if (highestIndex >= 0) {
|
||||
_pendingRequests.takeAt(highestIndex);
|
||||
}
|
||||
return highestResource;
|
||||
}
|
||||
|
||||
|
||||
QList<QSharedPointer<Resource>> ResourceCache::getLoadingRequests() {
|
||||
return DependencyManager::get<ResourceCacheSharedItems>()->getLoadingRequests();
|
||||
}
|
||||
|
|
|
@ -105,6 +105,12 @@ public:
|
|||
return aspect(getRecommendedRenderSize());
|
||||
}
|
||||
|
||||
// The recommended bounds for primary overlay placement
|
||||
virtual QRect getRecommendedOverlayRect() const {
|
||||
auto recommendedSize = getRecommendedUiSize();
|
||||
return QRect(0, 0, recommendedSize.x, recommendedSize.y);
|
||||
}
|
||||
|
||||
// Stereo specific methods
|
||||
virtual glm::mat4 getEyeProjection(Eye eye, const glm::mat4& baseProjection) const {
|
||||
return baseProjection;
|
||||
|
|
|
@ -22,12 +22,7 @@
|
|||
#define __STR1__(x) __STR2__(x)
|
||||
#define __LOC__ __FILE__ "(" __STR1__(__LINE__) ") : Warning Msg: "
|
||||
|
||||
#ifndef __APPLE__
|
||||
static const QString DESKTOP_LOCATION = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation);
|
||||
#else
|
||||
// Temporary fix to Qt bug: http://stackoverflow.com/questions/16194475
|
||||
static const QString DESKTOP_LOCATION = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation).append("/script.js");
|
||||
#endif
|
||||
|
||||
ScriptsModel& getScriptsModel() {
|
||||
static ScriptsModel scriptsModel;
|
||||
|
|
|
@ -128,7 +128,7 @@ void vec3FromScriptValue(const QScriptValue &object, glm::vec3 &vec3) {
|
|||
vec3.z = object.property("z").toVariant().toFloat();
|
||||
}
|
||||
|
||||
QVariant vec3toVariant(const glm::vec3 &vec3) {
|
||||
QVariant vec3toVariant(const glm::vec3& vec3) {
|
||||
if (vec3.x != vec3.x || vec3.y != vec3.y || vec3.z != vec3.z) {
|
||||
// if vec3 contains a NaN don't try to convert it
|
||||
return QVariant();
|
||||
|
@ -140,6 +140,18 @@ QVariant vec3toVariant(const glm::vec3 &vec3) {
|
|||
return result;
|
||||
}
|
||||
|
||||
QVariant vec4toVariant(const glm::vec4& vec4) {
|
||||
if (isNaN(vec4.x) || isNaN(vec4.y) || isNaN(vec4.z) || isNaN(vec4.w)) {
|
||||
// if vec4 contains a NaN don't try to convert it
|
||||
return QVariant();
|
||||
}
|
||||
QVariantMap result;
|
||||
result["x"] = vec4.x;
|
||||
result["y"] = vec4.y;
|
||||
result["z"] = vec4.z;
|
||||
result["w"] = vec4.w;
|
||||
return result;
|
||||
}
|
||||
|
||||
QScriptValue qVectorVec3ToScriptValue(QScriptEngine* engine, const QVector<glm::vec3>& vector) {
|
||||
QScriptValue array = engine->newArray();
|
||||
|
@ -150,7 +162,7 @@ QScriptValue qVectorVec3ToScriptValue(QScriptEngine* engine, const QVector<glm::
|
|||
}
|
||||
|
||||
|
||||
glm::vec3 vec3FromVariant(const QVariant &object, bool& valid) {
|
||||
glm::vec3 vec3FromVariant(const QVariant& object, bool& valid) {
|
||||
glm::vec3 v;
|
||||
valid = false;
|
||||
if (!object.isValid() || object.isNull()) {
|
||||
|
@ -189,12 +201,49 @@ glm::vec3 vec3FromVariant(const QVariant &object, bool& valid) {
|
|||
return v;
|
||||
}
|
||||
|
||||
glm::vec3 vec3FromVariant(const QVariant &object) {
|
||||
glm::vec3 vec3FromVariant(const QVariant& object) {
|
||||
bool valid = false;
|
||||
return vec3FromVariant(object, valid);
|
||||
}
|
||||
|
||||
QScriptValue quatToScriptValue(QScriptEngine* engine, const glm::quat &quat) {
|
||||
glm::vec4 vec4FromVariant(const QVariant& object, bool& valid) {
|
||||
glm::vec4 v;
|
||||
valid = false;
|
||||
if (!object.isValid() || object.isNull()) {
|
||||
return v;
|
||||
} else if (object.canConvert<float>()) {
|
||||
v = glm::vec4(object.toFloat());
|
||||
valid = true;
|
||||
} else if (object.canConvert<QVector4D>()) {
|
||||
auto qvec4 = qvariant_cast<QVector4D>(object);
|
||||
v.x = qvec4.x();
|
||||
v.y = qvec4.y();
|
||||
v.z = qvec4.z();
|
||||
v.w = qvec4.w();
|
||||
valid = true;
|
||||
} else {
|
||||
auto map = object.toMap();
|
||||
auto x = map["x"];
|
||||
auto y = map["y"];
|
||||
auto z = map["z"];
|
||||
auto w = map["w"];
|
||||
if (x.canConvert<float>() && y.canConvert<float>() && z.canConvert<float>() && w.canConvert<float>()) {
|
||||
v.x = x.toFloat();
|
||||
v.y = y.toFloat();
|
||||
v.z = z.toFloat();
|
||||
v.w = w.toFloat();
|
||||
valid = true;
|
||||
}
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
glm::vec4 vec4FromVariant(const QVariant& object) {
|
||||
bool valid = false;
|
||||
return vec4FromVariant(object, valid);
|
||||
}
|
||||
|
||||
QScriptValue quatToScriptValue(QScriptEngine* engine, const glm::quat& quat) {
|
||||
QScriptValue obj = engine->newObject();
|
||||
if (quat.x != quat.x || quat.y != quat.y || quat.z != quat.z || quat.w != quat.w) {
|
||||
// if quat contains a NaN don't try to convert it
|
||||
|
@ -207,7 +256,7 @@ QScriptValue quatToScriptValue(QScriptEngine* engine, const glm::quat &quat) {
|
|||
return obj;
|
||||
}
|
||||
|
||||
void quatFromScriptValue(const QScriptValue &object, glm::quat &quat) {
|
||||
void quatFromScriptValue(const QScriptValue& object, glm::quat &quat) {
|
||||
quat.x = object.property("x").toVariant().toFloat();
|
||||
quat.y = object.property("y").toVariant().toFloat();
|
||||
quat.z = object.property("z").toVariant().toFloat();
|
||||
|
@ -245,12 +294,12 @@ glm::quat quatFromVariant(const QVariant &object, bool& isValid) {
|
|||
return q;
|
||||
}
|
||||
|
||||
glm::quat quatFromVariant(const QVariant &object) {
|
||||
glm::quat quatFromVariant(const QVariant& object) {
|
||||
bool valid = false;
|
||||
return quatFromVariant(object, valid);
|
||||
}
|
||||
|
||||
QVariant quatToVariant(const glm::quat &quat) {
|
||||
QVariant quatToVariant(const glm::quat& quat) {
|
||||
if (quat.x != quat.x || quat.y != quat.y || quat.z != quat.z) {
|
||||
// if vec3 contains a NaN don't try to convert it
|
||||
return QVariant();
|
||||
|
|
|
@ -43,12 +43,15 @@ void mat4FromScriptValue(const QScriptValue& object, glm::mat4& mat4);
|
|||
// Vec4
|
||||
QScriptValue vec4toScriptValue(QScriptEngine* engine, const glm::vec4& vec4);
|
||||
void vec4FromScriptValue(const QScriptValue& object, glm::vec4& vec4);
|
||||
QVariant vec4toVariant(const glm::vec4& vec4);
|
||||
glm::vec4 vec4FromVariant(const QVariant &object, bool& valid);
|
||||
glm::vec4 vec4FromVariant(const QVariant &object);
|
||||
|
||||
// Vec3
|
||||
QScriptValue vec3toScriptValue(QScriptEngine* engine, const glm::vec3 &vec3);
|
||||
void vec3FromScriptValue(const QScriptValue &object, glm::vec3 &vec3);
|
||||
|
||||
QVariant vec3toVariant(const glm::vec3 &vec3);
|
||||
QVariant vec3toVariant(const glm::vec3& vec3);
|
||||
glm::vec3 vec3FromVariant(const QVariant &object, bool& valid);
|
||||
glm::vec3 vec3FromVariant(const QVariant &object);
|
||||
|
||||
|
@ -71,9 +74,10 @@ glm::quat quatFromVariant(const QVariant &object);
|
|||
// Rect
|
||||
QScriptValue qRectToScriptValue(QScriptEngine* engine, const QRect& rect);
|
||||
void qRectFromScriptValue(const QScriptValue& object, QRect& rect);
|
||||
|
||||
QVariant qRectToVariant(const QRect& rect);
|
||||
QRect qRectFromVariant(const QVariant& object, bool& isValid);
|
||||
QRect qRectFromVariant(const QVariant& object);
|
||||
QVariant qRectToVariant(const QRect& rect);
|
||||
|
||||
|
||||
// xColor
|
||||
QScriptValue xColorToScriptValue(QScriptEngine* engine, const xColor& color);
|
||||
|
|
|
@ -620,7 +620,7 @@ glm::vec3 SpatiallyNestable::getLocalPosition() const {
|
|||
return result;
|
||||
}
|
||||
|
||||
void SpatiallyNestable::setLocalPosition(const glm::vec3& position) {
|
||||
void SpatiallyNestable::setLocalPosition(const glm::vec3& position, bool tellPhysics) {
|
||||
// guard against introducing NaN into the transform
|
||||
if (isNaN(position)) {
|
||||
qDebug() << "SpatiallyNestable::setLocalPosition -- position contains NaN";
|
||||
|
@ -629,7 +629,7 @@ void SpatiallyNestable::setLocalPosition(const glm::vec3& position) {
|
|||
_transformLock.withWriteLock([&] {
|
||||
_transform.setTranslation(position);
|
||||
});
|
||||
locationChanged();
|
||||
locationChanged(tellPhysics);
|
||||
}
|
||||
|
||||
glm::quat SpatiallyNestable::getLocalOrientation() const {
|
||||
|
|
|
@ -102,7 +102,7 @@ public:
|
|||
virtual void setLocalTransform(const Transform& transform);
|
||||
|
||||
virtual glm::vec3 getLocalPosition() const;
|
||||
virtual void setLocalPosition(const glm::vec3& position);
|
||||
virtual void setLocalPosition(const glm::vec3& position, bool tellPhysics = true);
|
||||
|
||||
virtual glm::quat getLocalOrientation() const;
|
||||
virtual void setLocalOrientation(const glm::quat& orientation);
|
||||
|
|
|
@ -38,9 +38,11 @@
|
|||
|
||||
#include <GLMHelpers.h>
|
||||
#include <PathUtils.h>
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
#include <GeometryCache.h>
|
||||
#include <DeferredLightingEffect.h>
|
||||
#include <NumericalConstants.h>
|
||||
#include <TextureCache.h>
|
||||
|
||||
#include "unlit_frag.h"
|
||||
#include "unlit_vert.h"
|
||||
|
@ -83,6 +85,93 @@ public:
|
|||
|
||||
uint32_t toCompactColor(const glm::vec4& color);
|
||||
|
||||
|
||||
const char* VERTEX_SHADER = R"SHADER(
|
||||
#version 450 core
|
||||
|
||||
layout(location = 0) in vec4 inPosition;
|
||||
layout(location = 3) in vec2 inTexCoord0;
|
||||
|
||||
struct TransformObject {
|
||||
mat4 _model;
|
||||
mat4 _modelInverse;
|
||||
};
|
||||
|
||||
layout(location=15) in ivec2 _drawCallInfo;
|
||||
|
||||
uniform samplerBuffer transformObjectBuffer;
|
||||
|
||||
TransformObject getTransformObject() {
|
||||
int offset = 8 * _drawCallInfo.x;
|
||||
TransformObject object;
|
||||
object._model[0] = texelFetch(transformObjectBuffer, offset);
|
||||
object._model[1] = texelFetch(transformObjectBuffer, offset + 1);
|
||||
object._model[2] = texelFetch(transformObjectBuffer, offset + 2);
|
||||
object._model[3] = texelFetch(transformObjectBuffer, offset + 3);
|
||||
|
||||
object._modelInverse[0] = texelFetch(transformObjectBuffer, offset + 4);
|
||||
object._modelInverse[1] = texelFetch(transformObjectBuffer, offset + 5);
|
||||
object._modelInverse[2] = texelFetch(transformObjectBuffer, offset + 6);
|
||||
object._modelInverse[3] = texelFetch(transformObjectBuffer, offset + 7);
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
struct TransformCamera {
|
||||
mat4 _view;
|
||||
mat4 _viewInverse;
|
||||
mat4 _projectionViewUntranslated;
|
||||
mat4 _projection;
|
||||
mat4 _projectionInverse;
|
||||
vec4 _viewport;
|
||||
};
|
||||
|
||||
layout(std140) uniform transformCameraBuffer {
|
||||
TransformCamera _camera;
|
||||
};
|
||||
|
||||
TransformCamera getTransformCamera() {
|
||||
return _camera;
|
||||
}
|
||||
|
||||
// the interpolated normal
|
||||
out vec2 _texCoord0;
|
||||
|
||||
void main(void) {
|
||||
_texCoord0 = inTexCoord0.st;
|
||||
|
||||
// standard transform
|
||||
TransformCamera cam = getTransformCamera();
|
||||
TransformObject obj = getTransformObject();
|
||||
{ // transformModelToClipPos
|
||||
vec4 eyeWAPos;
|
||||
{ // _transformModelToEyeWorldAlignedPos
|
||||
highp mat4 _mv = obj._model;
|
||||
_mv[3].xyz -= cam._viewInverse[3].xyz;
|
||||
highp vec4 _eyeWApos = (_mv * inPosition);
|
||||
eyeWAPos = _eyeWApos;
|
||||
}
|
||||
gl_Position = cam._projectionViewUntranslated * eyeWAPos;
|
||||
}
|
||||
|
||||
})SHADER";
|
||||
|
||||
const char* FRAGMENT_SHADER = R"SHADER(
|
||||
#version 450 core
|
||||
|
||||
uniform sampler2D originalTexture;
|
||||
|
||||
in vec2 _texCoord0;
|
||||
|
||||
layout(location = 0) out vec4 _fragColor0;
|
||||
|
||||
void main(void) {
|
||||
//_fragColor0 = vec4(_texCoord0, 0.0, 1.0);
|
||||
_fragColor0 = texture(originalTexture, _texCoord0);
|
||||
}
|
||||
)SHADER";
|
||||
|
||||
|
||||
gpu::ShaderPointer makeShader(const std::string & vertexShaderSrc, const std::string & fragmentShaderSrc, const gpu::Shader::BindingSet & bindings) {
|
||||
auto vs = gpu::Shader::createVertex(vertexShaderSrc);
|
||||
auto fs = gpu::Shader::createPixel(fragmentShaderSrc);
|
||||
|
@ -125,6 +214,7 @@ class QTestWindow : public QWindow {
|
|||
glm::mat4 _projectionMatrix;
|
||||
RateCounter fps;
|
||||
QTime _time;
|
||||
glm::mat4 _camera;
|
||||
|
||||
protected:
|
||||
void renderText();
|
||||
|
@ -145,7 +235,7 @@ public:
|
|||
setGLFormatVersion(format);
|
||||
format.setProfile(QSurfaceFormat::OpenGLContextProfile::CoreProfile);
|
||||
format.setOption(QSurfaceFormat::DebugContext);
|
||||
format.setSwapInterval(0);
|
||||
//format.setSwapInterval(0);
|
||||
|
||||
setFormat(format);
|
||||
|
||||
|
@ -158,19 +248,22 @@ public:
|
|||
|
||||
gpu::Context::init<gpu::GLBackend>();
|
||||
_context = std::make_shared<gpu::Context>();
|
||||
|
||||
makeCurrent();
|
||||
auto shader = makeShader(unlit_vert, unlit_frag, gpu::Shader::BindingSet{});
|
||||
auto state = std::make_shared<gpu::State>();
|
||||
state->setMultisampleEnable(true);
|
||||
state->setDepthTest(gpu::State::DepthTest { true });
|
||||
_pipeline = gpu::Pipeline::create(shader, state);
|
||||
|
||||
|
||||
|
||||
// Clear screen
|
||||
gpu::Batch batch;
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLORS, { 1.0, 0.0, 0.5, 1.0 });
|
||||
_context->render(batch);
|
||||
|
||||
DependencyManager::set<GeometryCache>();
|
||||
DependencyManager::set<TextureCache>();
|
||||
DependencyManager::set<DeferredLightingEffect>();
|
||||
|
||||
resize(QSize(800, 600));
|
||||
|
@ -181,182 +274,227 @@ public:
|
|||
virtual ~QTestWindow() {
|
||||
}
|
||||
|
||||
void updateCamera() {
|
||||
float t = _time.elapsed() * 1e-4f;
|
||||
glm::vec3 unitscale { 1.0f };
|
||||
glm::vec3 up { 0.0f, 1.0f, 0.0f };
|
||||
|
||||
float distance = 3.0f;
|
||||
glm::vec3 camera_position { distance * sinf(t), 0.5f, distance * cosf(t) };
|
||||
|
||||
static const vec3 camera_focus(0);
|
||||
static const vec3 camera_up(0, 1, 0);
|
||||
_camera = glm::inverse(glm::lookAt(camera_position, camera_focus, up));
|
||||
}
|
||||
|
||||
|
||||
void drawFloorGrid(gpu::Batch& batch) {
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
// Render grid on xz plane (not the optimal way to do things, but w/e)
|
||||
// Note: GeometryCache::renderGrid will *not* work, as it is apparenly unaffected by batch rotations and renders xy only
|
||||
static const std::string GRID_INSTANCE = "Grid";
|
||||
static auto compactColor1 = toCompactColor(vec4 { 0.35f, 0.25f, 0.15f, 1.0f });
|
||||
static auto compactColor2 = toCompactColor(vec4 { 0.15f, 0.25f, 0.35f, 1.0f });
|
||||
static std::vector<glm::mat4> transforms;
|
||||
static gpu::BufferPointer colorBuffer;
|
||||
if (!transforms.empty()) {
|
||||
transforms.reserve(200);
|
||||
colorBuffer = std::make_shared<gpu::Buffer>();
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
{
|
||||
glm::mat4 transform = glm::translate(mat4(), vec3(0, -1, -50 + i));
|
||||
transform = glm::scale(transform, vec3(100, 1, 1));
|
||||
transforms.push_back(transform);
|
||||
colorBuffer->append(compactColor1);
|
||||
}
|
||||
|
||||
{
|
||||
glm::mat4 transform = glm::mat4_cast(quat(vec3(0, PI / 2.0f, 0)));
|
||||
transform = glm::translate(transform, vec3(0, -1, -50 + i));
|
||||
transform = glm::scale(transform, vec3(100, 1, 1));
|
||||
transforms.push_back(transform);
|
||||
colorBuffer->append(compactColor2);
|
||||
}
|
||||
}
|
||||
}
|
||||
auto pipeline = geometryCache->getSimplePipeline();
|
||||
for (auto& transform : transforms) {
|
||||
batch.setModelTransform(transform);
|
||||
batch.setupNamedCalls(GRID_INSTANCE, [=](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
|
||||
batch.setViewTransform(_camera);
|
||||
batch.setPipeline(_pipeline);
|
||||
geometryCache->renderWireShapeInstances(batch, GeometryCache::Line, data.count(), colorBuffer);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void drawSimpleShapes(gpu::Batch& batch) {
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
static const size_t ITEM_COUNT = 1000;
|
||||
static const float SHAPE_INTERVAL = (PI * 2.0f) / ITEM_COUNT;
|
||||
static const float ITEM_INTERVAL = SHAPE_INTERVAL / TYPE_COUNT;
|
||||
|
||||
static const gpu::Element POSITION_ELEMENT { gpu::VEC3, gpu::FLOAT, gpu::XYZ };
|
||||
static const gpu::Element NORMAL_ELEMENT { gpu::VEC3, gpu::FLOAT, gpu::XYZ };
|
||||
static const gpu::Element COLOR_ELEMENT { gpu::VEC4, gpu::NUINT8, gpu::RGBA };
|
||||
|
||||
static std::vector<Transform> transforms;
|
||||
static std::vector<vec4> colors;
|
||||
static gpu::BufferPointer colorBuffer;
|
||||
static gpu::BufferView colorView;
|
||||
static gpu::BufferView instanceXfmView;
|
||||
if (!colorBuffer) {
|
||||
colorBuffer = std::make_shared<gpu::Buffer>();
|
||||
|
||||
static const float ITEM_RADIUS = 20;
|
||||
static const vec3 ITEM_TRANSLATION { 0, 0, -ITEM_RADIUS };
|
||||
for (size_t i = 0; i < TYPE_COUNT; ++i) {
|
||||
GeometryCache::Shape shape = SHAPE[i];
|
||||
GeometryCache::ShapeData shapeData = geometryCache->_shapes[shape];
|
||||
//indirectCommand._count
|
||||
float startingInterval = ITEM_INTERVAL * i;
|
||||
for (size_t j = 0; j < ITEM_COUNT; ++j) {
|
||||
float theta = j * SHAPE_INTERVAL + startingInterval;
|
||||
auto transform = glm::rotate(mat4(), theta, Vectors::UP);
|
||||
transform = glm::rotate(transform, (randFloat() - 0.5f) * PI / 4.0f, Vectors::UNIT_X);
|
||||
transform = glm::translate(transform, ITEM_TRANSLATION);
|
||||
transform = glm::scale(transform, vec3(randFloat() / 2.0f + 0.5f));
|
||||
transforms.push_back(transform);
|
||||
auto color = vec4 { randomColorValue(64), randomColorValue(64), randomColorValue(64), 255 };
|
||||
color /= 255.0f;
|
||||
colors.push_back(color);
|
||||
colorBuffer->append(toCompactColor(color));
|
||||
}
|
||||
}
|
||||
colorView = gpu::BufferView(colorBuffer, COLOR_ELEMENT);
|
||||
}
|
||||
|
||||
batch.setViewTransform(_camera);
|
||||
batch.setPipeline(_pipeline);
|
||||
batch.setInputFormat(getInstancedSolidStreamFormat());
|
||||
for (size_t i = 0; i < TYPE_COUNT; ++i) {
|
||||
GeometryCache::Shape shape = SHAPE[i];
|
||||
GeometryCache::ShapeData shapeData = geometryCache->_shapes[shape];
|
||||
batch.setInputBuffer(gpu::Stream::COLOR, colorView);
|
||||
for (size_t j = 0; j < ITEM_COUNT; ++j) {
|
||||
batch.setModelTransform(transforms[j]);
|
||||
shapeData.draw(batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void drawCenterShape(gpu::Batch& batch) {
|
||||
// Render unlit cube + sphere
|
||||
static auto startUsecs = usecTimestampNow();
|
||||
float seconds = getSeconds(startUsecs);
|
||||
seconds /= 4.0f;
|
||||
batch.setModelTransform(Transform());
|
||||
batch._glColor4f(0.8f, 0.25f, 0.25f, 1.0f);
|
||||
|
||||
bool wire = (seconds - floorf(seconds) > 0.5f);
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
int shapeIndex = ((int)seconds) % TYPE_COUNT;
|
||||
if (wire) {
|
||||
geometryCache->renderWireShape(batch, SHAPE[shapeIndex]);
|
||||
} else {
|
||||
geometryCache->renderShape(batch, SHAPE[shapeIndex]);
|
||||
}
|
||||
|
||||
batch.setModelTransform(Transform().setScale(2.05f));
|
||||
batch._glColor4f(1, 1, 1, 1);
|
||||
geometryCache->renderWireCube(batch);
|
||||
}
|
||||
|
||||
void drawTerrain(gpu::Batch& batch) {
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
static std::once_flag once;
|
||||
static gpu::BufferPointer vertexBuffer { std::make_shared<gpu::Buffer>() };
|
||||
static gpu::BufferPointer indexBuffer { std::make_shared<gpu::Buffer>() };
|
||||
|
||||
static gpu::BufferView positionView;
|
||||
static gpu::BufferView textureView;
|
||||
static gpu::Stream::FormatPointer vertexFormat { std::make_shared<gpu::Stream::Format>() };
|
||||
|
||||
static gpu::TexturePointer texture;
|
||||
static gpu::PipelinePointer pipeline;
|
||||
std::call_once(once, [&] {
|
||||
static const uint SHAPE_VERTEX_STRIDE = sizeof(glm::vec4) * 2; // position, normals, textures
|
||||
static const uint SHAPE_TEXTURES_OFFSET = sizeof(glm::vec4);
|
||||
static const gpu::Element POSITION_ELEMENT { gpu::VEC3, gpu::FLOAT, gpu::XYZ };
|
||||
static const gpu::Element TEXTURE_ELEMENT { gpu::VEC2, gpu::FLOAT, gpu::UV };
|
||||
std::vector<vec4> vertices;
|
||||
const int MINX = -1000;
|
||||
const int MAXX = 1000;
|
||||
|
||||
// top
|
||||
vertices.push_back(vec4(MAXX, 0, MAXX, 1));
|
||||
vertices.push_back(vec4(MAXX, MAXX, 0, 0));
|
||||
|
||||
vertices.push_back(vec4(MAXX, 0, MINX, 1));
|
||||
vertices.push_back(vec4(MAXX, 0, 0, 0));
|
||||
|
||||
vertices.push_back(vec4(MINX, 0, MINX, 1));
|
||||
vertices.push_back(vec4(0, 0, 0, 0));
|
||||
|
||||
vertices.push_back(vec4(MINX, 0, MAXX, 1));
|
||||
vertices.push_back(vec4(0, MAXX, 0, 0));
|
||||
|
||||
vertexBuffer->append(vertices);
|
||||
indexBuffer->append(std::vector<uint16_t>({ 0, 1, 2, 2, 3, 0 }));
|
||||
|
||||
positionView = gpu::BufferView(vertexBuffer, 0, vertexBuffer->getSize(), SHAPE_VERTEX_STRIDE, POSITION_ELEMENT);
|
||||
textureView = gpu::BufferView(vertexBuffer, SHAPE_TEXTURES_OFFSET, vertexBuffer->getSize(), SHAPE_VERTEX_STRIDE, TEXTURE_ELEMENT);
|
||||
texture = DependencyManager::get<TextureCache>()->getImageTexture("C:/Users/bdavis/Git/openvr/samples/bin/cube_texture.png");
|
||||
//texture = DependencyManager::get<TextureCache>()->getImageTexture("H:/test.png");
|
||||
//texture = DependencyManager::get<TextureCache>()->getImageTexture("H:/crate_blue.fbm/lambert8SG_Normal_OpenGL.png");
|
||||
|
||||
auto shader = makeShader(VERTEX_SHADER, FRAGMENT_SHADER, gpu::Shader::BindingSet {});
|
||||
auto state = std::make_shared<gpu::State>();
|
||||
state->setMultisampleEnable(false);
|
||||
state->setDepthTest(gpu::State::DepthTest { true });
|
||||
pipeline = gpu::Pipeline::create(shader, state);
|
||||
vertexFormat->setAttribute(gpu::Stream::POSITION);
|
||||
vertexFormat->setAttribute(gpu::Stream::TEXCOORD);
|
||||
});
|
||||
batch.setPipeline(pipeline);
|
||||
batch.setInputBuffer(gpu::Stream::POSITION, positionView);
|
||||
batch.setInputBuffer(gpu::Stream::TEXCOORD, textureView);
|
||||
batch.setIndexBuffer(gpu::UINT16, indexBuffer, 0);
|
||||
batch.setInputFormat(vertexFormat);
|
||||
|
||||
batch.setResourceTexture(0, texture);
|
||||
batch.setModelTransform(glm::translate(glm::mat4(), vec3(0, -0.1, 0)));
|
||||
batch.drawIndexed(gpu::TRIANGLES, 6, 0);
|
||||
|
||||
batch.setResourceTexture(0, DependencyManager::get<TextureCache>()->getBlueTexture());
|
||||
batch.setModelTransform(glm::translate(glm::mat4(), vec3(0, -0.2, 0)));
|
||||
batch.drawIndexed(gpu::TRIANGLES, 6, 0);
|
||||
}
|
||||
|
||||
void draw() {
|
||||
// Attempting to draw before we're visible and have a valid size will
|
||||
// produce GL errors.
|
||||
if (!isVisible() || _size.width() <= 0 || _size.height() <= 0) {
|
||||
return;
|
||||
}
|
||||
updateCamera();
|
||||
makeCurrent();
|
||||
|
||||
gpu::Batch batch;
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLORS, { 0.0f, 0.0f, 0.0f, 1.0f });
|
||||
batch.resetStages();
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLORS, { 0.0f, 0.1f, 0.2f, 1.0f });
|
||||
batch.clearDepthFramebuffer(1e4);
|
||||
batch.setViewportTransform({ 0, 0, _size.width() * devicePixelRatio(), _size.height() * devicePixelRatio() });
|
||||
batch.setProjectionTransform(_projectionMatrix);
|
||||
|
||||
float t = _time.elapsed() * 1e-3f;
|
||||
glm::vec3 unitscale { 1.0f };
|
||||
glm::vec3 up { 0.0f, 1.0f, 0.0f };
|
||||
|
||||
float distance = 3.0f;
|
||||
glm::vec3 camera_position{ distance * sinf(t), 0.0f, distance * cosf(t) };
|
||||
|
||||
static const vec3 camera_focus(0);
|
||||
static const vec3 camera_up(0, 1, 0);
|
||||
glm::mat4 camera = glm::inverse(glm::lookAt(camera_position, camera_focus, up));
|
||||
batch.setViewTransform(camera);
|
||||
batch.setViewTransform(_camera);
|
||||
batch.setPipeline(_pipeline);
|
||||
batch.setModelTransform(Transform());
|
||||
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
|
||||
// Render grid on xz plane (not the optimal way to do things, but w/e)
|
||||
// Note: GeometryCache::renderGrid will *not* work, as it is apparenly unaffected by batch rotations and renders xy only
|
||||
{
|
||||
static const std::string GRID_INSTANCE = "Grid";
|
||||
static auto compactColor1 = toCompactColor(vec4{ 0.35f, 0.25f, 0.15f, 1.0f });
|
||||
static auto compactColor2 = toCompactColor(vec4{ 0.15f, 0.25f, 0.35f, 1.0f });
|
||||
static std::vector<glm::mat4> transforms;
|
||||
static gpu::BufferPointer colorBuffer;
|
||||
if (!transforms.empty()) {
|
||||
transforms.reserve(200);
|
||||
colorBuffer = std::make_shared<gpu::Buffer>();
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
{
|
||||
glm::mat4 transform = glm::translate(mat4(), vec3(0, -1, -50 + i));
|
||||
transform = glm::scale(transform, vec3(100, 1, 1));
|
||||
transforms.push_back(transform);
|
||||
colorBuffer->append(compactColor1);
|
||||
}
|
||||
|
||||
{
|
||||
glm::mat4 transform = glm::mat4_cast(quat(vec3(0, PI / 2.0f, 0)));
|
||||
transform = glm::translate(transform, vec3(0, -1, -50 + i));
|
||||
transform = glm::scale(transform, vec3(100, 1, 1));
|
||||
transforms.push_back(transform);
|
||||
colorBuffer->append(compactColor2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto pipeline = geometryCache->getSimplePipeline();
|
||||
for (auto& transform : transforms) {
|
||||
batch.setModelTransform(transform);
|
||||
batch.setupNamedCalls(GRID_INSTANCE, [=](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
|
||||
batch.setViewTransform(camera);
|
||||
batch.setPipeline(_pipeline);
|
||||
geometryCache->renderWireShapeInstances(batch, GeometryCache::Line, data.count(), colorBuffer);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
static const size_t ITEM_COUNT = 1000;
|
||||
static const float SHAPE_INTERVAL = (PI * 2.0f) / ITEM_COUNT;
|
||||
static const float ITEM_INTERVAL = SHAPE_INTERVAL / TYPE_COUNT;
|
||||
|
||||
static const gpu::Element POSITION_ELEMENT{ gpu::VEC3, gpu::FLOAT, gpu::XYZ };
|
||||
static const gpu::Element NORMAL_ELEMENT{ gpu::VEC3, gpu::FLOAT, gpu::XYZ };
|
||||
static const gpu::Element COLOR_ELEMENT{ gpu::VEC4, gpu::NUINT8, gpu::RGBA };
|
||||
static const gpu::Element TRANSFORM_ELEMENT{ gpu::MAT4, gpu::FLOAT, gpu::XYZW };
|
||||
|
||||
|
||||
static std::vector<Transform> transforms;
|
||||
static std::vector<vec4> colors;
|
||||
static gpu::BufferPointer indirectBuffer;
|
||||
static gpu::BufferPointer transformBuffer;
|
||||
static gpu::BufferPointer colorBuffer;
|
||||
static gpu::BufferView colorView;
|
||||
static gpu::BufferView instanceXfmView;
|
||||
|
||||
if (!transformBuffer) {
|
||||
transformBuffer = std::make_shared<gpu::Buffer>();
|
||||
colorBuffer = std::make_shared<gpu::Buffer>();
|
||||
indirectBuffer = std::make_shared<gpu::Buffer>();
|
||||
|
||||
static const float ITEM_RADIUS = 20;
|
||||
static const vec3 ITEM_TRANSLATION{ 0, 0, -ITEM_RADIUS };
|
||||
for (size_t i = 0; i < TYPE_COUNT; ++i) {
|
||||
GeometryCache::Shape shape = SHAPE[i];
|
||||
GeometryCache::ShapeData shapeData = geometryCache->_shapes[shape];
|
||||
{
|
||||
gpu::Batch::DrawIndexedIndirectCommand indirectCommand;
|
||||
indirectCommand._count = (uint)shapeData._indexCount;
|
||||
indirectCommand._instanceCount = ITEM_COUNT;
|
||||
indirectCommand._baseInstance = (uint)(i * ITEM_COUNT);
|
||||
indirectCommand._firstIndex = (uint)shapeData._indexOffset / 2;
|
||||
indirectCommand._baseVertex = 0;
|
||||
indirectBuffer->append(indirectCommand);
|
||||
}
|
||||
|
||||
//indirectCommand._count
|
||||
float startingInterval = ITEM_INTERVAL * i;
|
||||
for (size_t j = 0; j < ITEM_COUNT; ++j) {
|
||||
float theta = j * SHAPE_INTERVAL + startingInterval;
|
||||
auto transform = glm::rotate(mat4(), theta, Vectors::UP);
|
||||
transform = glm::rotate(transform, (randFloat() - 0.5f) * PI / 4.0f, Vectors::UNIT_X);
|
||||
transform = glm::translate(transform, ITEM_TRANSLATION);
|
||||
transform = glm::scale(transform, vec3(randFloat() / 2.0f + 0.5f));
|
||||
transformBuffer->append(transform);
|
||||
transforms.push_back(transform);
|
||||
auto color = vec4{ randomColorValue(64), randomColorValue(64), randomColorValue(64), 255 };
|
||||
color /= 255.0f;
|
||||
colors.push_back(color);
|
||||
colorBuffer->append(toCompactColor(color));
|
||||
}
|
||||
}
|
||||
colorView = gpu::BufferView(colorBuffer, COLOR_ELEMENT);
|
||||
instanceXfmView = gpu::BufferView(transformBuffer, TRANSFORM_ELEMENT);
|
||||
}
|
||||
|
||||
#if 1
|
||||
GeometryCache::ShapeData shapeData = geometryCache->_shapes[GeometryCache::Icosahedron];
|
||||
{
|
||||
batch.setViewTransform(camera);
|
||||
batch.setModelTransform(Transform());
|
||||
batch.setPipeline(_pipeline);
|
||||
batch.setInputFormat(getInstancedSolidStreamFormat());
|
||||
batch.setInputBuffer(gpu::Stream::COLOR, colorView);
|
||||
batch.setIndirectBuffer(indirectBuffer);
|
||||
shapeData.setupBatch(batch);
|
||||
batch.multiDrawIndexedIndirect(TYPE_COUNT, gpu::TRIANGLES);
|
||||
}
|
||||
#else
|
||||
batch.setViewTransform(camera);
|
||||
batch.setPipeline(_pipeline);
|
||||
for (size_t i = 0; i < TYPE_COUNT; ++i) {
|
||||
GeometryCache::Shape shape = SHAPE[i];
|
||||
for (size_t j = 0; j < ITEM_COUNT; ++j) {
|
||||
int index = i * ITEM_COUNT + j;
|
||||
batch.setModelTransform(transforms[index]);
|
||||
const vec4& color = colors[index];
|
||||
batch._glColor4f(color.r, color.g, color.b, 1.0);
|
||||
geometryCache->renderShape(batch, shape);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Render unlit cube + sphere
|
||||
static auto startUsecs = usecTimestampNow();
|
||||
float seconds = getSeconds(startUsecs);
|
||||
|
||||
seconds /= 4.0f;
|
||||
int shapeIndex = ((int)seconds) % TYPE_COUNT;
|
||||
bool wire = (seconds - floorf(seconds) > 0.5f);
|
||||
batch.setModelTransform(Transform());
|
||||
batch._glColor4f(0.8f, 0.25f, 0.25f, 1.0f);
|
||||
|
||||
if (wire) {
|
||||
geometryCache->renderWireShape(batch, SHAPE[shapeIndex]);
|
||||
} else {
|
||||
geometryCache->renderShape(batch, SHAPE[shapeIndex]);
|
||||
}
|
||||
|
||||
batch.setModelTransform(Transform().setScale(2.05f));
|
||||
batch._glColor4f(1, 1, 1, 1);
|
||||
geometryCache->renderWireCube(batch);
|
||||
//drawFloorGrid(batch);
|
||||
//drawSimpleShapes(batch);
|
||||
drawCenterShape(batch);
|
||||
drawTerrain(batch);
|
||||
|
||||
_context->render(batch);
|
||||
_qGlContext.swapBuffers(this);
|
||||
|
@ -387,12 +525,12 @@ protected:
|
|||
int main(int argc, char** argv) {
|
||||
QGuiApplication app(argc, argv);
|
||||
QTestWindow window;
|
||||
QTimer timer;
|
||||
timer.setInterval(0);
|
||||
app.connect(&timer, &QTimer::timeout, &app, [&] {
|
||||
auto timer = new QTimer(&app);
|
||||
timer->setInterval(0);
|
||||
app.connect(timer, &QTimer::timeout, &app, [&] {
|
||||
window.draw();
|
||||
});
|
||||
timer.start();
|
||||
timer->start();
|
||||
app.exec();
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue