Merge remote-tracking branch 'upstream/master' into homereset

This commit is contained in:
James B. Pollack 2016-04-10 11:23:16 -07:00
commit 02f69607d7
114 changed files with 2706 additions and 1224 deletions

View file

@ -16,7 +16,7 @@ Contributing
git checkout -b new_branch_name
```
4. Code
* Follow the [coding standard](http://docs.highfidelity.io/v1.0/docs/coding-standard)
* Follow the [coding standard](https://readme.highfidelity.com/v1.0/docs/coding-standard)
5. Commit
* Use [well formed commit messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)
6. Update your branch

View file

@ -397,7 +397,7 @@ void Agent::processAgentAvatarAndAudio(float deltaTime) {
if (_numAvatarSoundSentBytes == soundByteArray.size()) {
// we're done with this sound object - so set our pointer back to NULL
// and our sent bytes back to zero
_avatarSound = NULL;
_avatarSound.clear();
_numAvatarSoundSentBytes = 0;
}
}

View file

@ -56,7 +56,7 @@ public:
public slots:
void run();
void playAvatarSound(Sound* avatarSound) { setAvatarSound(avatarSound); }
void playAvatarSound(SharedSoundPointer avatarSound) { setAvatarSound(avatarSound); }
private slots:
void requestScript();
@ -77,7 +77,7 @@ private:
MixedAudioStream _receivedAudioStream;
float _lastReceivedAudioLoudness;
void setAvatarSound(Sound* avatarSound) { _avatarSound = avatarSound; }
void setAvatarSound(SharedSoundPointer avatarSound) { _avatarSound = avatarSound; }
void sendAvatarIdentityPacket();
void sendAvatarBillboardPacket();
@ -85,7 +85,7 @@ private:
QString _scriptContents;
QTimer* _scriptRequestTimeout { nullptr };
bool _isListeningToAudioStream = false;
Sound* _avatarSound = nullptr;
SharedSoundPointer _avatarSound;
int _numAvatarSoundSentBytes = 0;
bool _isAvatar = false;
QTimer* _avatarIdentityTimer = nullptr;

View file

@ -235,6 +235,11 @@ void AssignmentClient::sendAssignmentRequest() {
void AssignmentClient::handleCreateAssignmentPacket(QSharedPointer<ReceivedMessage> message) {
qCDebug(assigmnentclient) << "Received a PacketType::CreateAssignment - attempting to unpack.";
if (_currentAssignment) {
qCWarning(assigmnentclient) << "Received a PacketType::CreateAssignment while still running an active assignment. Ignoring.";
return;
}
// construct the deployed assignment from the packet data
_currentAssignment = AssignmentFactory::unpackAssignment(*message);

View file

@ -1,5 +1,5 @@
{
"version": 1.1,
"version": 1.2,
"settings": [
{
"name": "metaverse",
@ -249,7 +249,7 @@
"label": "X end",
"can_set": true,
"placeholder": "16384.0"
},
},
{
"name": "y_min",
"label": "Y start",

View file

@ -103,4 +103,5 @@
<script src='js/sweetalert.min.js'></script>
<script src='js/settings.js'></script>
<script src='js/form2js.min.js'></script>
<script src='js/sha256.js'></script>
<!--#include virtual="page-end.html"-->

View file

@ -867,6 +867,14 @@ function saveSettings() {
// grab a JSON representation of the form via form2js
var formJSON = form2js('settings-form', ".", false, cleanupFormValues, true);
// check if we've set the basic http password - if so convert it to base64
if (formJSON["security"]) {
var password = formJSON["security"]["http_password"];
if (password.length > 0) {
formJSON["security"]["http_password"] = sha256_digest(password);
}
}
console.log(formJSON);
// re-enable all inputs

View file

@ -0,0 +1,247 @@
/*
* A JavaScript implementation of the SHA256 hash function.
*
* FILE: sha256.js
* VERSION: 0.8
* AUTHOR: Christoph Bichlmeier <informatik@zombiearena.de>
*
* NOTE: This version is not tested thoroughly!
*
* Copyright (c) 2003, Christoph Bichlmeier
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* ======================================================================
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* SHA256 logical functions */
function rotateRight(n,x) {
return ((x >>> n) | (x << (32 - n)));
}
function choice(x,y,z) {
return ((x & y) ^ (~x & z));
}
function majority(x,y,z) {
return ((x & y) ^ (x & z) ^ (y & z));
}
function sha256_Sigma0(x) {
return (rotateRight(2, x) ^ rotateRight(13, x) ^ rotateRight(22, x));
}
function sha256_Sigma1(x) {
return (rotateRight(6, x) ^ rotateRight(11, x) ^ rotateRight(25, x));
}
function sha256_sigma0(x) {
return (rotateRight(7, x) ^ rotateRight(18, x) ^ (x >>> 3));
}
function sha256_sigma1(x) {
return (rotateRight(17, x) ^ rotateRight(19, x) ^ (x >>> 10));
}
function sha256_expand(W, j) {
return (W[j&0x0f] += sha256_sigma1(W[(j+14)&0x0f]) + W[(j+9)&0x0f] +
sha256_sigma0(W[(j+1)&0x0f]));
}
/* Hash constant words K: */
var K256 = new Array(
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
);
/* global arrays */
var ihash, count, buffer;
var sha256_hex_digits = "0123456789abcdef";
/* Add 32-bit integers with 16-bit operations (bug in some JS-interpreters:
overflow) */
function safe_add(x, y)
{
var lsw = (x & 0xffff) + (y & 0xffff);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xffff);
}
/* Initialise the SHA256 computation */
function sha256_init() {
ihash = new Array(8);
count = new Array(2);
buffer = new Array(64);
count[0] = count[1] = 0;
ihash[0] = 0x6a09e667;
ihash[1] = 0xbb67ae85;
ihash[2] = 0x3c6ef372;
ihash[3] = 0xa54ff53a;
ihash[4] = 0x510e527f;
ihash[5] = 0x9b05688c;
ihash[6] = 0x1f83d9ab;
ihash[7] = 0x5be0cd19;
}
/* Transform a 512-bit message block */
function sha256_transform() {
var a, b, c, d, e, f, g, h, T1, T2;
var W = new Array(16);
/* Initialize registers with the previous intermediate value */
a = ihash[0];
b = ihash[1];
c = ihash[2];
d = ihash[3];
e = ihash[4];
f = ihash[5];
g = ihash[6];
h = ihash[7];
/* make 32-bit words */
for(var i=0; i<16; i++)
W[i] = ((buffer[(i<<2)+3]) | (buffer[(i<<2)+2] << 8) | (buffer[(i<<2)+1]
<< 16) | (buffer[i<<2] << 24));
for(var j=0; j<64; j++) {
T1 = h + sha256_Sigma1(e) + choice(e, f, g) + K256[j];
if(j < 16) T1 += W[j];
else T1 += sha256_expand(W, j);
T2 = sha256_Sigma0(a) + majority(a, b, c);
h = g;
g = f;
f = e;
e = safe_add(d, T1);
d = c;
c = b;
b = a;
a = safe_add(T1, T2);
}
/* Compute the current intermediate hash value */
ihash[0] += a;
ihash[1] += b;
ihash[2] += c;
ihash[3] += d;
ihash[4] += e;
ihash[5] += f;
ihash[6] += g;
ihash[7] += h;
}
/* Read the next chunk of data and update the SHA256 computation */
function sha256_update(data, inputLen) {
var i, index, curpos = 0;
/* Compute number of bytes mod 64 */
index = ((count[0] >> 3) & 0x3f);
var remainder = (inputLen & 0x3f);
/* Update number of bits */
if ((count[0] += (inputLen << 3)) < (inputLen << 3)) count[1]++;
count[1] += (inputLen >> 29);
/* Transform as many times as possible */
for(i=0; i+63<inputLen; i+=64) {
for(var j=index; j<64; j++)
buffer[j] = data.charCodeAt(curpos++);
sha256_transform();
index = 0;
}
/* Buffer remaining input */
for(var j=0; j<remainder; j++)
buffer[j] = data.charCodeAt(curpos++);
}
/* Finish the computation by operations such as padding */
function sha256_final() {
var index = ((count[0] >> 3) & 0x3f);
buffer[index++] = 0x80;
if(index <= 56) {
for(var i=index; i<56; i++)
buffer[i] = 0;
} else {
for(var i=index; i<64; i++)
buffer[i] = 0;
sha256_transform();
for(var i=0; i<56; i++)
buffer[i] = 0;
}
buffer[56] = (count[1] >>> 24) & 0xff;
buffer[57] = (count[1] >>> 16) & 0xff;
buffer[58] = (count[1] >>> 8) & 0xff;
buffer[59] = count[1] & 0xff;
buffer[60] = (count[0] >>> 24) & 0xff;
buffer[61] = (count[0] >>> 16) & 0xff;
buffer[62] = (count[0] >>> 8) & 0xff;
buffer[63] = count[0] & 0xff;
sha256_transform();
}
/* Split the internal hash values into an array of bytes */
function sha256_encode_bytes() {
var j=0;
var output = new Array(32);
for(var i=0; i<8; i++) {
output[j++] = ((ihash[i] >>> 24) & 0xff);
output[j++] = ((ihash[i] >>> 16) & 0xff);
output[j++] = ((ihash[i] >>> 8) & 0xff);
output[j++] = (ihash[i] & 0xff);
}
return output;
}
/* Get the internal hash as a hex string */
function sha256_encode_hex() {
var output = new String();
for(var i=0; i<8; i++) {
for(var j=28; j>=0; j-=4)
output += sha256_hex_digits.charAt((ihash[i] >>> j) & 0x0f);
}
return output;
}
/* Main function: returns a hex string representing the SHA256 value of the
given data */
function sha256_digest(data) {
sha256_init();
sha256_update(data, data.length);
sha256_final();
return sha256_encode_hex();
}
/* test if the JS-interpreter is working properly */
function sha256_self_test() {
return sha256_digest("message digest") ==
"f7846f55cf23e14eebeab5b4e1550cad5b509e3348fbc4efa3a1413d393cb650";
}

View file

@ -1679,8 +1679,9 @@ bool DomainServer::isAuthenticatedRequest(HTTPConnection* connection, const QUrl
QString settingsUsername = valueForKeyPath(settingsMap, BASIC_AUTH_USERNAME_KEY_PATH)->toString();
const QVariant* settingsPasswordVariant = valueForKeyPath(settingsMap, BASIC_AUTH_PASSWORD_KEY_PATH);
QString settingsPassword = settingsPasswordVariant ? settingsPasswordVariant->toString() : "";
QString hexHeaderPassword = QCryptographicHash::hash(headerPassword.toUtf8(), QCryptographicHash::Sha256).toHex();
if (settingsUsername == headerUsername && headerPassword == settingsPassword) {
if (settingsUsername == headerUsername && hexHeaderPassword == settingsPassword) {
return true;
}
}

View file

@ -129,7 +129,9 @@ void DomainServerSettingsManager::setupConfigMap(const QStringList& argumentList
// reload the master and user config so that the merged config is right
_configMap.loadMasterAndUserConfig(argumentList);
}
} else if (oldVersion < 1.1) {
}
if (oldVersion < 1.1) {
static const QString ENTITY_SERVER_SETTINGS_KEY = "entity_server_settings";
static const QString ENTITY_FILE_NAME_KEY = "persistFilename";
static const QString ENTITY_FILE_PATH_KEYPATH = ENTITY_SERVER_SETTINGS_KEY + ".persistFilePath";
@ -165,6 +167,28 @@ void DomainServerSettingsManager::setupConfigMap(const QStringList& argumentList
}
}
if (oldVersion < 1.2) {
// This was prior to the base64 encoding of password for HTTP Basic Authentication.
// If we have a password in the previous settings file, make it base 64
static const QString BASIC_AUTH_PASSWORD_KEY_PATH { "security.http_password" };
QVariant* passwordVariant = valueForKeyPath(_configMap.getUserConfig(), BASIC_AUTH_PASSWORD_KEY_PATH);
if (passwordVariant && passwordVariant->canConvert(QMetaType::QString)) {
QString plaintextPassword = passwordVariant->toString();
qDebug() << "Migrating plaintext password to SHA256 hash in domain-server settings.";
*passwordVariant = QCryptographicHash::hash(plaintextPassword.toUtf8(), QCryptographicHash::Sha256).toHex();
// write the new settings to file
persistToFile();
// reload the master and user config so the merged config is correct
_configMap.loadMasterAndUserConfig(argumentList);
}
}
}
// write the current description version to our settings

View file

@ -12,7 +12,7 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
/*global print, MyAvatar, Entities, AnimationCache, SoundCache, Scene, Camera, Overlays, Audio, HMD, AvatarList, AvatarManager, Controller, UndoStack, Window, Account, GlobalServices, Script, ScriptDiscoveryService, LODManager, Menu, Vec3, Quat, AudioDevice, Paths, Clipboard, Settings, XMLHttpRequest, randFloat, randInt, pointInExtents, vec3equal, setEntityCustomData, getEntityCustomData */
Script.include("../libraries/utils.js");
Script.include("/~/libraries/utils.js");
//
@ -79,6 +79,7 @@ var NEAR_PICK_MAX_DISTANCE = 0.3; // max length of pick-ray for close grabbing t
var PICK_BACKOFF_DISTANCE = 0.2; // helps when hand is intersecting the grabble object
var NEAR_GRABBING_KINEMATIC = true; // force objects to be kinematic when near-grabbed
var SHOW_GRAB_SPHERE = false; // draw a green sphere to show the grab search position and size
var CHECK_TOO_FAR_UNEQUIP_TIME = 1.0; // seconds
//
// equip
@ -290,11 +291,13 @@ function MyController(hand) {
this.intersectionDistance = 0.0;
this.searchSphereDistance = DEFAULT_SEARCH_SPHERE_DISTANCE;
this.ignoreIK = false;
this.offsetPosition = Vec3.ZERO;
this.offsetRotation = Quat.IDENTITY;
this.lastPickTime = 0;
this.lastUnequipCheckTime = 0;
var _this = this;
this.update = function() {
@ -1523,18 +1526,30 @@ function MyController(hand) {
return;
}
if (props.parentID == MyAvatar.sessionUUID &&
Vec3.length(props.localPosition) > NEAR_PICK_MAX_DISTANCE * 2.0) {
// for whatever reason, the held/equipped entity has been pulled away. ungrab or unequip.
print("handControllerGrab -- autoreleasing held or equipped item because it is far from hand." +
props.parentID + " " + vec3toStr(props.position));
this.setState(STATE_RELEASE);
if (this.state == STATE_CONTINUE_NEAR_GRABBING) {
this.callEntityMethodOnGrabbed("releaseGrab");
} else { // (this.state == STATE_CONTINUE_EQUIP || this.state == STATE_CONTINUE_HOLD)
this.callEntityMethodOnGrabbed("releaseEquip");
var now = Date.now();
if (now - this.lastUnequipCheckTime > MSECS_PER_SEC * CHECK_TOO_FAR_UNEQUIP_TIME) {
this.lastUnequipCheckTime = now;
if (props.parentID == MyAvatar.sessionUUID &&
Vec3.length(props.localPosition) > NEAR_PICK_MAX_DISTANCE * 2.0) {
var handPosition = this.getHandPosition();
// the center of the equipped object being far from the hand isn't enough to autoequip -- we also
// need to fail the findEntities test.
nearPickedCandidateEntities = Entities.findEntities(handPosition, GRAB_RADIUS);
if (nearPickedCandidateEntities.indexOf(this.grabbedEntity) == -1) {
// for whatever reason, the held/equipped entity has been pulled away. ungrab or unequip.
print("handControllerGrab -- autoreleasing held or equipped item because it is far from hand." +
props.parentID + " " + vec3toStr(props.position));
this.setState(STATE_RELEASE);
if (this.state == STATE_CONTINUE_NEAR_GRABBING) {
this.callEntityMethodOnGrabbed("releaseGrab");
} else { // (this.state == STATE_CONTINUE_EQUIP || this.state == STATE_CONTINUE_HOLD)
this.callEntityMethodOnGrabbed("releaseEquip");
}
return;
}
}
return;
}
// Keep track of the fingertip velocity to impart when we release the object.

View file

@ -11,7 +11,7 @@
Script.load("away.js");
Script.load("progress.js");
Script.load("edit.js");
Script.load("marketplace.js");
Script.load("examples.js");
Script.load("selectAudioDevice.js");
Script.load("notifications.js");
Script.load("users.js");

View file

@ -98,10 +98,21 @@ function seekToLookAt() {
}
function autoHideReticle() {
var now = Date.now();
// sometimes we don't actually get mouse move messages (for example, if the focus has been set
// to an overlay or web page 'overlay') in but the mouse can still be moving, and we don't want
// to autohide in these cases, so we will take this opportunity to also check if the reticle
// position has changed.
if (lastMouseX != Reticle.position.x || lastMouseY != Reticle.position.y) {
lastMouseMoveOrClick = now;
lastMouseX = Reticle.position.x;
lastMouseY = Reticle.position.y;
}
// if we haven't moved in a long period of time, and we're not pointing at some
// system overlay (like a window), then hide the reticle
if (Reticle.visible && !Reticle.pointingAtSystemOverlay) {
var now = Date.now();
var timeSinceLastMouseMove = now - lastMouseMoveOrClick;
if (timeSinceLastMouseMove > HIDE_STATIC_MOUSE_AFTER) {
Reticle.visible = false;

View file

@ -0,0 +1,78 @@
//
// largeHall.js
// examples
//
// Created by Freidrica on 4/1/16.
// Copyright 2016 High Fidelity, Inc.
//
// This entity script invokes reverb upon entering an entity acting as a trigger zone
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
(function() {
var _this = this;
print("EBL PRELOADING NEW VERSION ")
var audioOptions = new AudioEffectOptions({
bandwidth: 7000,
preDelay: 80,
lateDelay: 0,
reverbTime: 3,
earlyDiffusion: 100,
lateDiffusion: 100,
roomSize: 50,
density: 100,
bassMult: 1.5,
bassFreq: 250,
highGain: -12,
highFreq: 3000,
modRate: 2.3,
modDepth: 50,
earlyGain: -12,
lateGain: -12,
earlyMixLeft: 20,
earlyMixRight: 20,
lateMixLeft: 90,
lateMixRight: 90,
wetDryMix: 90,
});
function setter(name) {
return function(value) {
audioOptions[name] = value;
AudioDevice.setReverbOptions(audioOptions);
}
}
function getter(name) {
return function() {
return audioOptions[name];
}
}
function displayer(units) {
return function(value) {
return (value).toFixed(1) + units;
}
}
function scriptEnding() {
AudioDevice.setReverb(false);
print("Reverb is OFF.");
}
_this.enterEntity = function(entityID) {
print('EBL I am insiude');
AudioDevice.setReverbOptions(audioOptions);
AudioDevice.setReverb(true);
print("Reverb is ON.");
};
_this.leaveEntity = function(entityID) {
print('EBL I am outsidee');
AudioDevice.setReverb(false);
print("Reverb is OFF.");
// Messages.sendMessage('PlayBackOnAssignment', 'BowShootingGameWelcome');
};
});

View file

@ -0,0 +1,78 @@
//
// smallRoom.js
// examples
//
// Created by Freidrica on 4/1/16.
// Copyright 2016 High Fidelity, Inc.
//
// This entity script invokes reverb upon entering an entity acting as a trigger zone
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
(function() {
var _this = this;
print("EBL PRELOADING NEW VERSION ")
var audioOptions = new AudioEffectOptions({
bandwidth: 7000,
preDelay: 20,
lateDelay: 0,
reverbTime: 1.5,
earlyDiffusion: 100,
lateDiffusion: 100,
roomSize: 50,
density: 100,
bassMult: 1.5,
bassFreq: 250,
highGain: -12,
highFreq: 3000,
modRate: 2.3,
modDepth: 50,
earlyGain: -24,
lateGain: -24,
earlyMixLeft: 20,
earlyMixRight: 20,
lateMixLeft: 90,
lateMixRight: 90,
wetDryMix: 70,
});
function setter(name) {
return function(value) {
audioOptions[name] = value;
AudioDevice.setReverbOptions(audioOptions);
}
}
function getter(name) {
return function() {
return audioOptions[name];
}
}
function displayer(units) {
return function(value) {
return (value).toFixed(1) + units;
}
}
function scriptEnding() {
AudioDevice.setReverb(false);
print("Reverb is OFF.");
}
_this.enterEntity = function(entityID) {
print('EBL I am insiude');
// create a slider for each parameter
AudioDevice.setReverbOptions(audioOptions);
AudioDevice.setReverb(true);
print("Reverb is ON.");
};
_this.leaveEntity = function(entityID) {
print('EBL I am outside');
AudioDevice.setReverb(false);
print("Reverb is OFF.");
};
});

View file

@ -1,5 +1,5 @@
//
// marketplace.js
// examples.js
// examples
//
// Created by Eric Levin on 8 Jan 2016
@ -16,9 +16,9 @@ Script.include([
HIFI_PUBLIC_BUCKET = "http://s3.amazonaws.com/hifi-public/";
var toolIconUrl = HIFI_PUBLIC_BUCKET + "images/tools/";
var MARKETPLACE_URL = "https://metaverse.highfidelity.com/marketplace";
var marketplaceWindow = new OverlayWebWindow({
title: 'Marketplace',
var EXAMPLES_URL = "https://metaverse.highfidelity.com/examples";
var examplesWindow = new OverlayWebWindow({
title: 'Examples',
source: "about:blank",
width: 900,
height: 700,
@ -29,43 +29,43 @@ var toolHeight = 50;
var toolWidth = 50;
function showMarketplace(marketplaceID) {
var url = MARKETPLACE_URL;
function showExamples(marketplaceID) {
var url = EXAMPLES_URL;
if (marketplaceID) {
url = url + "/items/" + marketplaceID;
}
print("setting marketplace URL to " + url);
marketplaceWindow.setURL(url);
marketplaceWindow.setVisible(true);
print("setting examples URL to " + url);
examplesWindow.setURL(url);
examplesWindow.setVisible(true);
}
function hideMarketplace() {
marketplaceWindow.setVisible(false);
marketplaceWindow.setURL("about:blank");
function hideExamples() {
examplesWindow.setVisible(false);
examplesWindow.setURL("about:blank");
}
function toggleMarketplace() {
if (marketplaceWindow.visible) {
hideMarketplace();
function toggleExamples() {
if (examplesWindow.visible) {
hideExamples();
} else {
showMarketplace();
showExamples();
}
}
var toolBar = (function() {
var that = {},
toolBar,
browseMarketplaceButton;
browseExamplesButton;
function initialize() {
toolBar = new ToolBar(0, 0, ToolBar.VERTICAL, "highfidelity.marketplace.toolbar", function(windowDimensions, toolbar) {
toolBar = new ToolBar(0, 0, ToolBar.VERTICAL, "highfidelity.examples.toolbar", function(windowDimensions, toolbar) {
return {
x: windowDimensions.x - 8 - toolbar.width,
y: 135
};
});
browseMarketplaceButton = toolBar.addTool({
imageURL: toolIconUrl + "market-01.svg",
browseExamplesButton = toolBar.addTool({
imageURL: toolIconUrl + "examples-01.svg",
subImage: {
x: 0,
y: Tool.IMAGE_WIDTH,
@ -79,10 +79,10 @@ var toolBar = (function() {
showButtonDown: true
});
toolBar.showTool(browseMarketplaceButton, true);
toolBar.showTool(browseExamplesButton, true);
}
var browseMarketplaceButtonDown = false;
var browseExamplesButtonDown = false;
that.mousePressEvent = function(event) {
var clickedOverlay,
url,
@ -98,10 +98,8 @@ var toolBar = (function() {
y: event.y
});
if (browseMarketplaceButton === toolBar.clicked(clickedOverlay)) {
toggleMarketplace();
if (browseExamplesButton === toolBar.clicked(clickedOverlay)) {
toggleExamples();
return true;
}
@ -112,7 +110,7 @@ var toolBar = (function() {
var handled = false;
if (browseMarketplaceButtonDown) {
if (browseExamplesButtonDown) {
var clickedOverlay = Overlays.getOverlayAtPoint({
x: event.x,
y: event.y
@ -120,7 +118,7 @@ var toolBar = (function() {
}
newModelButtonDown = false;
browseMarketplaceButtonDown = false;
browseExamplesButtonDown = false;
return handled;
}

View file

@ -10,43 +10,50 @@
@font-face {
font-family: Raleway-Regular;
src: url(../../resources/fonts/Raleway-Regular.ttf), /* Production */
url(../../interface/resources/fonts/Raleway-Regular.ttf); /* Development */
src: url(../../resources/fonts/Raleway-Regular.ttf), /* Windows production */
url(../../fonts/Raleway-Regular.ttf), /* OSX production */
url(../../interface/resources/fonts/Raleway-Regular.ttf); /* Development, running script in /HiFi/examples */
}
@font-face {
font-family: Raleway-Light;
src: url(../../resources/fonts/Raleway-Light.ttf),
url(../../fonts/Raleway-Light.ttf),
url(../../interface/resources/fonts/Raleway-Light.ttf);
}
@font-face {
font-family: Raleway-Bold;
src: url(../../resources/fonts/Raleway-Bold.ttf),
url(../../fonts/Raleway-Bold.ttf),
url(../../interface/resources/fonts/Raleway-Bold.ttf);
}
@font-face {
font-family: Raleway-SemiBold;
src: url(../../resources/fonts/Raleway-SemiBold.ttf),
url(../../fonts/Raleway-SemiBold.ttf),
url(../../interface/resources/fonts/Raleway-SemiBold.ttf);
}
@font-face {
font-family: FiraSans-SemiBold;
src: url(../../resources/fonts/FiraSans-SemiBold.ttf),
url(../../fonts/FiraSans-SemiBold.ttf),
url(../../interface/resources/fonts/FiraSans-SemiBold.ttf);
}
@font-face {
font-family: AnonymousPro-Regular;
src: url(../../resources/fonts/AnonymousPro-Regular.ttf),
src: url(../../resources/fonts/AnonymousPro-Regular.ttf),
url(../../fonts/AnonymousPro-Regular.ttf),
url(../../interface/resources/fonts/AnonymousPro-Regular.ttf);
}
@font-face {
font-family: HiFi-Glyphs;
src: url(../../resources/fonts/hifi-glyphs.ttf),
src: url(../../resources/fonts/hifi-glyphs.ttf),
url(../../fonts/hifi-glyphs.ttf),
url(../../interface/resources/fonts/hifi-glyphs.ttf);
}
@ -69,8 +76,10 @@ body {
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
overflow-x: hidden;
overflow-y: auto;
}
table {
font-family: FiraSans-SemiBold;
@ -231,11 +240,11 @@ input[type="text"] {
input[type="number"] {
position: relative;
height: 28px;
width: 120px;
width: 124px;
}
input[type=number] {
padding-right: 6px;
padding-right: 3px;
}
input[type=number]::-webkit-inner-spin-button {
-webkit-appearance: none;
@ -243,6 +252,7 @@ input[type=number]::-webkit-inner-spin-button {
display: block;
position: relative;
width: 10px;
height: 100%;
overflow: hidden;
font-family: hifi-glyphs;
font-size: 50px;
@ -452,6 +462,10 @@ input[type=checkbox]:checked + label:hover {
min-height: 29px;
}
.property.checkbox {
width: auto;
}
.property label {
display: table-cell;
vertical-align: middle;
@ -657,7 +671,7 @@ div.refresh input[type="button"] {
padding-left: 25px;
}
.pyr .tuple input {
padding-left: 45px;
padding-left: 40px;
}
.tuple div > label:first-child {
@ -779,6 +793,7 @@ textarea:enabled[scrolling="true"]::-webkit-resizer {
width: 100%;
overflow-x: hidden;
overflow-y: auto;
box-sizing: border-box;
padding-top: 28px; /* Space for header and footer outside of scroll region. */
margin-top: 28px;
border-left: 2px solid #575757;
@ -797,6 +812,7 @@ textarea:enabled[scrolling="true"]::-webkit-resizer {
}
#entity-table thead {
box-sizing: border-box;
border: 2px solid #575757;
border-top-left-radius: 7px;
border-top-right-radius: 7px;
@ -804,6 +820,7 @@ textarea:enabled[scrolling="true"]::-webkit-resizer {
}
#entity-table tfoot {
box-sizing: border-box;
border: 2px solid #575757;
border-bottom-left-radius: 7px;
border-bottom-right-radius: 7px;

View file

@ -254,7 +254,7 @@
function resize() {
// Take up available window space
elEntityTableScroll.style.height = window.innerHeight - 232;
elEntityTableScroll.style.height = window.innerHeight - 200;
// Update the widths of the header cells to match the body
var tds = document.querySelectorAll("#entity-table-body tr:first-child td");

View file

@ -467,37 +467,6 @@
var elZTextureURL = document.getElementById("property-z-texture-url");
var elPreviewCameraButton = document.getElementById("preview-camera-button");
var urlUpdaters = document.getElementsByClassName("update-url-version");
var PARAM_REGEXP = /(?:\?)(\S+)/; // Check if this has any parameters.
var TIMESTAMP_REGEXP = /(&?HFTime=\d+)/;
var refreshEvent = function (event) {
var urlElement = event.target.parentElement.getElementsByTagName("INPUT")[0];
var content = urlElement.value;
var date = new Date();
var timeStamp = date.getTime();
if(content.length > 0){
if(PARAM_REGEXP.test(content)){
// Has params, so lets remove existing definition and append again.
content = content.replace(TIMESTAMP_REGEXP,"") + "&";
}else{
content += "?";
}
content = content.replace("?&","?");
urlElement.value = content + "HFTime=" + timeStamp;
}
var evt = document.createEvent("HTMLEvents");
evt.initEvent("change", true, true );
urlElement.dispatchEvent(evt);
};
for(var index = 0; index < urlUpdaters.length; index++){
var urlUpdater = urlUpdaters[index];
urlUpdater.addEventListener("click", refreshEvent, true);
}
if (window.EventBridge !== undefined) {
var properties;
@ -1397,20 +1366,17 @@
<option value="3">Edged marching cubes</option>
</select>
</div>
<div class="spatial-group poly-vox-section property url refresh">
<div class="spatial-group poly-vox-section property url ">
<label for="property-x-texture-url">X-axis texture URL</label>
<input type="text" id="property-x-texture-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="spatial-group poly-vox-section property url refresh">
<div class="spatial-group poly-vox-section property url ">
<label for="property-y-texture-url">Y-axis texture URL</label>
<input type="text" id="property-y-texture-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="spatial-group poly-vox-section property url refresh">
<div class="spatial-group poly-vox-section property url ">
<label for="property-z-texture-url">Z-axis texture URL</label>
<input type="text" id="property-z-texture-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="spatial-group property pyr">
<label>Rotation</label>
@ -1545,16 +1511,15 @@
</div>
</div>
<div class="behavior-group property url refresh">
<div class="behavior-group property url ">
<label for="property-collision-sound-url">Collision sound URL</label>
<input type="text" id="property-collision-sound-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="behavior-group property number">
<label>Lifetime</label>
<input type="number" id="property-lifetime">
</div>
<div class="behavior-group property url refresh">
<div class="behavior-group property url ">
<!--
FIXME: If reload buttons at the end of each URL continue to work OK during beta, this reload button and associated
code should be removed.
@ -1563,17 +1528,15 @@
-->
<label for="property-script-url">Script URL</label>
<input type="text" id="property-script-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="section-header model-group model-section zone-section">
<label>Model</label><span>M</span>
</div>
<div class="model-group model-section property url refresh">
<div class="model-group model-section property url ">
<label for="property-model-url">Model URL</label>
<input type="text" id="property-model-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="model-group model-section zone-section property dropdown">
<label>Collision shape type</label>
@ -1584,15 +1547,13 @@
<option value="compound">Compound</option>
</select>
</div>
<div class="model-group model-section zone-section property url refresh">
<div class="model-group model-section zone-section property url ">
<label for="property-compound-shape-url">Compound shape URL</label>
<input type="text" id="property-compound-shape-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="model-group model-section property url refresh">
<div class="model-group model-section property url ">
<label for="property-model-animation-url">Animation URL</label>
<input type="text" id="property-model-animation-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="model-group model-section property checkbox">
<input type="checkbox" id="property-model-animation-playing">
@ -1693,10 +1654,9 @@
<label>Ambient intensity</label>
<input type="number" id="property-zone-key-ambient-intensity" min="0" max="10" step="0.1">
</div>
<div class="zone-group zone-section keylight-section property url refresh">
<div class="zone-group zone-section keylight-section property url ">
<label for="property-zone-key-ambient-url">Ambient URL</label>
<input type="text" id="property-zone-key-ambient-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="sub-section-header zone-group zone-section stage-section">
@ -1745,20 +1705,18 @@
<div><input type="number" class="blue" id="property-zone-skybox-color-blue"><label for="property-zone-skybox-color-blue">Blue:</label></div>
</div>
</div>
<div class="zone-group zone-section skybox-section property url refresh">
<div class="zone-group zone-section skybox-section property url ">
<label for="property-zone-skybox-url">Skybox URL</label>
<input type="text" id="property-zone-skybox-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>
<div class="section-header web-group web-section">
<label>Web</label><span>M</span>
</div>
<div class="web-group web-section property url refresh">
<div class="web-group web-section property url ">
<label for="property-web-source-url">Source URL</label>
<input type="text" id="property-web-source-url">
<input type="button" class="update-url-version glyph" value="F" />
</div>

21
examples/utilities/cache/cacheStats.js vendored Normal file
View file

@ -0,0 +1,21 @@
//
// cacheStats.js
// examples/utilities/cache
//
// Zach Pomerantz, created on 4/1/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// Set up the qml ui
var qml = Script.resolvePath('stats.qml');
var window = new OverlayWindow({
title: 'Cache Stats',
source: qml,
width: 300,
height: 200
});
window.setPosition(500, 50);
window.closed.connect(function() { Script.stop(); });

77
examples/utilities/cache/stats.qml vendored Normal file
View file

@ -0,0 +1,77 @@
//
// stats.qml
// examples/utilities/cache
//
// Created by Zach Pomerantz on 4/1/2016
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
//
import QtQuick 2.5
import QtQuick.Controls 1.4
import "../lib/plotperf"
Item {
id: root
anchors.fill: parent
property var caches: [["Animation", AnimationCache], ["Model", ModelCache], ["Texture", TextureCache], ["Sound", SoundCache]]
Grid {
id: grid
rows: root.caches.length; columns: 1; spacing: 8
anchors.fill: parent
Repeater {
id: repeater
model: root.caches
Row {
PlotPerf {
title: modelData[0] + " Count"
anchors.left: parent
height: (grid.height - (grid.spacing * (root.caches.length + 1))) / root.caches.length
width: grid.width / 2 - grid.spacing * 1.5
object: modelData[1]
valueNumDigits: "1"
plots: [
{
prop: "numTotal",
label: "total",
color: "#00B4EF"
},
{
prop: "numCached",
label: "cached",
color: "#1AC567"
}
]
}
PlotPerf {
title: modelData[0] + " Size"
anchors.right: parent
height: (grid.height - (grid.spacing * (root.caches.length + 1))) / root.caches.length
width: grid.width / 2 - grid.spacing * 1.5
object: modelData[1]
valueScale: 1048576
valueUnit: "Mb"
valueNumDigits: "1"
plots: [
{
prop: "sizeTotal",
label: "total",
color: "#00B4EF"
},
{
prop: "sizeCached",
label: "cached",
color: "#1AC567"
}
]
}
}
}
}
}

View file

@ -19,12 +19,8 @@ Item {
// The title of the graph
property string title
// THe object used as the default source object for the prop plots
// The object used as the default source object for the prop plots
property var object
// THis is my hack to get a property and assign it to a trigger var in order to get
// a signal called whenever the value changed
property var trigger
// Plots is an array of plot descriptor
// a default plot descriptor expects the following object:
@ -55,45 +51,38 @@ Item {
property var tick : 0
function createValues() {
print("trigger is: " + JSON.stringify(trigger))
if (Array.isArray(plots)) {
for (var i =0; i < plots.length; i++) {
var plot = plots[i];
print(" a pnew Plot:" + JSON.stringify(plot));
_values.push( {
object: (plot["object"] !== undefined ? plot["object"] : root.object),
value: plot["prop"],
valueMax: 1,
numSamplesConstantMax: 0,
valueHistory: new Array(),
label: (plot["label"] !== undefined ? plot["label"] : ""),
color: (plot["color"] !== undefined ? plot["color"] : "white"),
scale: (plot["scale"] !== undefined ? plot["scale"] : 1),
unit: (plot["unit"] !== undefined ? plot["unit"] : valueUnit)
})
}
for (var i =0; i < plots.length; i++) {
var plot = plots[i];
_values.push( {
object: (plot["object"] !== undefined ? plot["object"] : root.object),
value: plot["prop"],
valueMax: 1,
numSamplesConstantMax: 0,
valueHistory: new Array(),
label: (plot["label"] !== undefined ? plot["label"] : ""),
color: (plot["color"] !== undefined ? plot["color"] : "white"),
scale: (plot["scale"] !== undefined ? plot["scale"] : 1),
unit: (plot["unit"] !== undefined ? plot["unit"] : valueUnit)
})
}
print("in creator" + JSON.stringify(_values));
pullFreshValues();
}
Component.onCompleted: {
createValues();
print(JSON.stringify(_values));
}
function pullFreshValues() {
//print("pullFreshValues");
// Wait until values are created to begin pulling
if (!_values) { return; }
var VALUE_HISTORY_SIZE = 100;
var UPDATE_CANVAS_RATE = 20;
tick++;
var currentValueMax = 0
for (var i = 0; i < _values.length; i++) {
var currentVal = _values[i].object[_values[i].value] * _values[i].scale;
var currentVal = (+_values[i].object[_values[i].value]) * _values[i].scale;
_values[i].valueHistory.push(currentVal)
_values[i].numSamplesConstantMax++;
@ -125,11 +114,13 @@ Item {
valueMax = currentValueMax;
}
if (tick % UPDATE_CANVAS_RATE == 0) {
mycanvas.requestPaint()
}
mycanvas.requestPaint()
}
Timer {
interval: 100; running: true; repeat: true
onTriggered: pullFreshValues()
}
onTriggerChanged: pullFreshValues()
Canvas {
id: mycanvas
@ -165,9 +156,9 @@ Item {
ctx.fillStyle = val.color;
var bestValue = val.valueHistory[val.valueHistory.length -1];
ctx.textAlign = "right";
ctx.fillText(displayValue(bestValue, val.unit), width, (num + 2) * lineHeight * 1.5);
ctx.fillText(displayValue(bestValue, val.unit), width, (num + 2) * lineHeight * 1);
ctx.textAlign = "left";
ctx.fillText(val.label, 0, (num + 2) * lineHeight * 1.5);
ctx.fillText(val.label, 0, (num + 2) * lineHeight * 1);
}
function displayTitle(ctx, text, maxVal) {

View file

@ -10,7 +10,7 @@
//
import QtQuick 2.5
import QtQuick.Controls 1.4
import "plotperf"
import "../lib/plotperf"
Item {
id: statsUI
@ -32,7 +32,6 @@ Item {
title: "Num Buffers"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["bufferCPUCount"]
plots: [
{
prop: "bufferCPUCount",
@ -50,7 +49,6 @@ Item {
title: "gpu::Buffer Memory"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["bufferCPUMemoryUsage"]
valueScale: 1048576
valueUnit: "Mb"
valueNumDigits: "1"
@ -71,7 +69,6 @@ Item {
title: "Num Textures"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["textureCPUCount"]
plots: [
{
prop: "textureCPUCount",
@ -94,7 +91,6 @@ Item {
title: "gpu::Texture Memory"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["textureCPUMemoryUsage"]
valueScale: 1048576
valueUnit: "Mb"
valueNumDigits: "1"
@ -116,7 +112,6 @@ Item {
title: "Triangles"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["frameTriangleCount"]
valueScale: 1000
valueUnit: "K"
plots: [
@ -138,7 +133,6 @@ Item {
title: "Drawcalls"
height: parent.evalEvenHeight()
object: stats.config
trigger: stats.config["frameDrawcallCount"]
plots: [
{
prop: "frameAPIDrawcallCount",
@ -168,7 +162,6 @@ Item {
title: "Items"
height: parent.evalEvenHeight()
object: parent.drawOpaqueConfig
trigger: Render.getConfig("DrawOpaqueDeferred")["numDrawn"]
plots: [
{
object: Render.getConfig("DrawOpaqueDeferred"),

View file

@ -7,7 +7,8 @@ Script.include([
]);
var isActive = false;
var toolIconUrl = "http://headache.hungry.com/~seth/hifi/";
var toolIconUrl = "https://s3-us-west-1.amazonaws.com/hifi-content/seth/production/icons/"
var toolHeight = 50;
var toolWidth = 50;

View file

@ -53,6 +53,10 @@ IceServer::IceServer(int argc, char* argv[]) :
QTimer* inactivePeerTimer = new QTimer(this);
connect(inactivePeerTimer, &QTimer::timeout, this, &IceServer::clearInactivePeers);
inactivePeerTimer->start(CLEAR_INACTIVE_PEERS_INTERVAL_MSECS);
// handle public keys when they arrive from the QNetworkAccessManager
auto& networkAccessManager = NetworkAccessManager::getInstance();
connect(&networkAccessManager, &QNetworkAccessManager::finished, this, &IceServer::publicKeyReplyFinished);
}
bool IceServer::packetVersionMatch(const udt::Packet& packet) {
@ -62,8 +66,6 @@ bool IceServer::packetVersionMatch(const udt::Packet& packet) {
if (headerVersion == versionForPacketType(headerType)) {
return true;
} else {
qDebug() << "Packet version mismatch for packet" << headerType << " from" << packet.getSenderSockAddr();
return false;
}
}
@ -202,7 +204,6 @@ bool IceServer::isVerifiedHeartbeat(const QUuid& domainID, const QByteArray& pla
void IceServer::requestDomainPublicKey(const QUuid& domainID) {
// send a request to the metaverse API for the public key for this domain
auto& networkAccessManager = NetworkAccessManager::getInstance();
connect(&networkAccessManager, &QNetworkAccessManager::finished, this, &IceServer::publicKeyReplyFinished);
QUrl publicKeyURL { NetworkingConstants::METAVERSE_SERVER_URL };
QString publicKeyPath = QString("/api/v1/domains/%1/public_key").arg(uuidStringWithoutCurlyBraces(domainID));

View file

@ -107,12 +107,15 @@ elseif(WIN32)
# add an executable that also has the icon itself and the configured rc file as resources
add_executable(${TARGET_NAME} WIN32 ${INTERFACE_SRCS} ${QM} ${CONFIGURE_ICON_RC_OUTPUT})
add_custom_command(
TARGET ${TARGET_NAME}
POST_BUILD
COMMAND "mt.exe" -manifest "${CMAKE_CURRENT_SOURCE_DIR}/interface.exe.manifest" -inputresource:"$<TARGET_FILE:${TARGET_NAME}>"\;\#1 -outputresource:"$<TARGET_FILE:${TARGET_NAME}>"\;\#1
COMMENT "Adding OS version support manifest to exe"
)
if ( NOT DEV_BUILD )
add_custom_command(
TARGET ${TARGET_NAME}
POST_BUILD
COMMAND "mt.exe" -manifest "${CMAKE_CURRENT_SOURCE_DIR}/interface.exe.manifest" -inputresource:"$<TARGET_FILE:${TARGET_NAME}>"\;\#1 -outputresource:"$<TARGET_FILE:${TARGET_NAME}>"\;\#1
COMMENT "Adding OS version support manifest to exe"
)
endif()
else()
add_executable(${TARGET_NAME} ${INTERFACE_SRCS} ${QM})
endif()

View file

@ -0,0 +1,13 @@
{
"name": "Oculus Remote to Standard",
"channels": [
{ "from": "OculusRemote.Start", "to": "Actions.UiNavSelect" },
{ "from": "OculusRemote.Back", "to": "Actions.UiNavBack" },
{ "from": "OculusRemote.DU", "to": "Standard.DU" },
{ "from": "OculusRemote.DD", "to": "Standard.DD" },
{ "from": "OculusRemote.DL", "to": "Standard.DL" },
{ "from": "OculusRemote.DR", "to": "Standard.DR" }
]
}

View file

@ -0,0 +1,23 @@
{
"name": "Oculus Touch to Standard",
"channels": [
{ "from": "OculusTouch.LY", "filters": "invert", "to": "Standard.LY" },
{ "from": "OculusTouch.LX", "to": "Standard.LX" },
{ "from": "OculusTouch.LT", "to": "Standard.LT" },
{ "from": "OculusTouch.RY", "filters": "invert", "to": "Standard.RY" },
{ "from": "OculusTouch.RX", "to": "Standard.RX" },
{ "from": "OculusTouch.RT", "to": "Standard.RT" },
{ "from": "OculusTouch.RB", "to": "Standard.RB" },
{ "from": "OculusTouch.RS", "to": "Standard.RS" },
{ "from": "OculusTouch.LeftApplicationMenu", "to": "Standard.Back" },
{ "from": "OculusTouch.RightApplicationMenu", "to": "Standard.Start" },
{ "from": "OculusTouch.LeftHand", "to": "Standard.LeftHand" },
{ "from": "OculusTouch.RightHand", "to": "Standard.RightHand" }
]
}

View file

@ -2,9 +2,6 @@
"name": "Standard to Action",
"when": "Application.NavigationFocused",
"channels": [
{ "disabled_from": { "makeAxis" : [ "Standard.DD", "Standard.DU" ] }, "to": "Actions.UiNavVertical" },
{ "disabled_from": { "makeAxis" : [ "Standard.DL", "Standard.DR" ] }, "to": "Actions.UiNavLateral" },
{ "disabled_from": { "makeAxis" : [ "Standard.LB", "Standard.RB" ] }, "to": "Actions.UiNavGroup" },
{ "from": "Standard.DU", "to": "Actions.UiNavVertical" },
{ "from": "Standard.DD", "to": "Actions.UiNavVertical", "filters": "invert" },
{ "from": "Standard.DL", "to": "Actions.UiNavLateral", "filters": "invert" },

View file

@ -30,7 +30,7 @@ Window {
title: "Edit"
property alias tabView: tabView
implicitWidth: 520; implicitHeight: 695
minSize: Qt.vector2d(400, 500)
minSize: Qt.vector2d(412, 500)
HifiConstants { id: hifi }

View file

@ -5,6 +5,8 @@ WebEngineView {
id: root
property var newUrl;
profile.httpUserAgent: "Mozilla/5.0 Chrome (HighFidelityInterface)"
Component.onCompleted: {
console.log("Connecting JS messaging to Hifi Logging")
// Ensure the JS from the web-engine makes it to our logging

View file

@ -21,7 +21,7 @@ Desktop {
Component.onCompleted: {
WebEngine.settings.javascriptCanOpenWindows = true;
WebEngine.settings.javascriptCanAccessClipboard = false;
WebEngine.settings.spatialNavigationEnabled = true;
WebEngine.settings.spatialNavigationEnabled = false;
WebEngine.settings.localContentCanAccessRemoteUrls = true;
}

File diff suppressed because it is too large Load diff

View file

@ -135,14 +135,14 @@ public:
const ViewFrustum* getDisplayViewFrustum() const;
ViewFrustum* getShadowViewFrustum() override { return &_shadowViewFrustum; }
const OctreePacketProcessor& getOctreePacketProcessor() const { return _octreeProcessor; }
EntityTreeRenderer* getEntities() { return DependencyManager::get<EntityTreeRenderer>().data(); }
EntityTreeRenderer* getEntities() const { return DependencyManager::get<EntityTreeRenderer>().data(); }
QUndoStack* getUndoStack() { return &_undoStack; }
MainWindow* getWindow() { return _window; }
EntityTreePointer getEntityClipboard() { return _entityClipboard; }
MainWindow* getWindow() const { return _window; }
EntityTreePointer getEntityClipboard() const { return _entityClipboard; }
EntityTreeRenderer* getEntityClipboardRenderer() { return &_entityClipboardRenderer; }
EntityEditPacketSender* getEntityEditPacketSender() { return &_entityEditSender; }
ivec2 getMouse();
ivec2 getMouse() const;
FaceTracker* getActiveFaceTracker();
FaceTracker* getSelectedFaceTracker();
@ -155,7 +155,7 @@ public:
bool isForeground() const { return _isForeground; }
uint32_t getFrameCount() { return _frameCount; }
uint32_t getFrameCount() const { return _frameCount; }
float getFps() const { return _fps; }
float getTargetFrameRate(); // frames/second
float getLastInstanteousFps() const { return _lastInstantaneousFps; }
@ -179,7 +179,7 @@ public:
DisplayPlugin* getActiveDisplayPlugin();
const DisplayPlugin* getActiveDisplayPlugin() const;
FileLogger* getLogger() { return _logger; }
FileLogger* getLogger() const { return _logger; }
glm::vec2 getViewportDimensions() const;
@ -189,7 +189,7 @@ public:
bool isAboutToQuit() const { return _aboutToQuit; }
// the isHMDmode is true whenever we use the interface from an HMD and not a standard flat display
// the isHMDMode is true whenever we use the interface from an HMD and not a standard flat display
// rendering of several elements depend on that
// TODO: carry that information on the Camera as a setting
bool isHMDMode() const;
@ -197,14 +197,14 @@ public:
glm::mat4 getEyeOffset(int eye) const;
glm::mat4 getEyeProjection(int eye) const;
QRect getDesirableApplicationGeometry();
QRect getDesirableApplicationGeometry() const;
Bookmarks* getBookmarks() const { return _bookmarks; }
virtual bool canAcceptURL(const QString& url) const override;
virtual bool acceptURL(const QString& url, bool defaultUpload = false) override;
void setMaxOctreePacketsPerSecond(int maxOctreePPS);
int getMaxOctreePacketsPerSecond();
int getMaxOctreePacketsPerSecond() const;
render::ScenePointer getMain3DScene() override { return _main3DScene; }
render::ScenePointer getMain3DScene() const { return _main3DScene; }
@ -239,22 +239,23 @@ public slots:
bool exportEntities(const QString& filename, float x, float y, float z, float scale);
bool importEntities(const QString& url);
void setLowVelocityFilter(bool lowVelocityFilter);
static void setLowVelocityFilter(bool lowVelocityFilter);
Q_INVOKABLE void loadDialog();
Q_INVOKABLE void loadScriptURLDialog();
Q_INVOKABLE void loadScriptURLDialog() const;
void toggleLogDialog();
void toggleRunningScriptsWidget();
void toggleRunningScriptsWidget() const;
void toggleAssetServerWidget(QString filePath = "");
void handleLocalServerConnection();
void readArgumentsFromLocalSocket();
void handleLocalServerConnection() const;
void readArgumentsFromLocalSocket() const;
void packageModel();
static void packageModel();
void openUrl(const QUrl& url);
void openUrl(const QUrl& url) const;
void resetSensors(bool andReload = false);
void setActiveFaceTracker();
void setActiveFaceTracker() const;
void toggleSuppressDeadlockWatchdogStatus(bool checked);
#ifdef HAVE_IVIEWHMD
void setActiveEyeTracker();
@ -264,7 +265,7 @@ public slots:
#endif
void aboutApp();
void showHelp();
static void showHelp();
void cycleCamera();
void cameraMenuChanged();
@ -273,14 +274,14 @@ public slots:
void reloadResourceCaches();
void updateHeartbeat();
void updateHeartbeat() const;
void crashApplication();
void deadlockApplication();
static void crashApplication();
static void deadlockApplication();
void rotationModeChanged();
void rotationModeChanged() const;
void runTests();
static void runTests();
private slots:
void showDesktop();
@ -290,7 +291,7 @@ private slots:
void resettingDomain();
void audioMuteToggled();
void audioMuteToggled() const;
void faceTrackerMuteToggled();
void activeChanged(Qt::ApplicationState state);
@ -298,7 +299,7 @@ private slots:
void notifyPacketVersionMismatch();
void loadSettings();
void saveSettings();
void saveSettings() const;
bool acceptSnapshot(const QString& urlString);
bool askToSetAvatarUrl(const QString& url);
@ -308,18 +309,18 @@ private slots:
void displayAvatarAttachmentWarning(const QString& message) const;
bool displayAvatarAttachmentConfirmationDialog(const QString& name) const;
void setSessionUUID(const QUuid& sessionUUID);
void setSessionUUID(const QUuid& sessionUUID) const;
void domainChanged(const QString& domainHostname);
void updateWindowTitle();
void nodeAdded(SharedNodePointer node);
void updateWindowTitle() const;
void nodeAdded(SharedNodePointer node) const;
void nodeActivated(SharedNodePointer node);
void nodeKilled(SharedNodePointer node);
void packetSent(quint64 length);
static void packetSent(quint64 length);
void updateDisplayMode();
void updateInputModes();
private:
void initDisplay();
static void initDisplay();
void init();
void cleanupBeforeQuit();
@ -327,14 +328,14 @@ private:
void update(float deltaTime);
// Various helper functions called during update()
void updateLOD();
void updateLOD() const;
void updateThreads(float deltaTime);
void updateDialogs(float deltaTime);
void updateDialogs(float deltaTime) const;
void queryOctree(NodeType_t serverType, PacketType packetType, NodeToJurisdictionMap& jurisdictions);
void loadViewFrustum(Camera& camera, ViewFrustum& viewFrustum);
void queryOctree(NodeType_t serverType, PacketType packetType, NodeToJurisdictionMap& jurisdictions, bool forceResend = false);
static void loadViewFrustum(Camera& camera, ViewFrustum& viewFrustum);
glm::vec3 getSunDirection();
glm::vec3 getSunDirection() const;
void renderRearViewMirror(RenderArgs* renderArgs, const QRect& region);
@ -344,7 +345,7 @@ private:
MyAvatar* getMyAvatar() const;
void checkSkeleton();
void checkSkeleton() const;
void initializeAcceptedFiles();
@ -366,18 +367,18 @@ private:
void mouseMoveEvent(QMouseEvent* event);
void mousePressEvent(QMouseEvent* event);
void mouseDoublePressEvent(QMouseEvent* event);
void mouseDoublePressEvent(QMouseEvent* event) const;
void mouseReleaseEvent(QMouseEvent* event);
void touchBeginEvent(QTouchEvent* event);
void touchEndEvent(QTouchEvent* event);
void touchUpdateEvent(QTouchEvent* event);
void wheelEvent(QWheelEvent* event);
void wheelEvent(QWheelEvent* event) const;
void dropEvent(QDropEvent* event);
void dragEnterEvent(QDragEnterEvent* event);
static void dragEnterEvent(QDragEnterEvent* event);
void maybeToggleMenuVisible(QMouseEvent* event);
void maybeToggleMenuVisible(QMouseEvent* event) const;
MainWindow* _window;
QElapsedTimer& _sessionRunTimer;
@ -423,7 +424,7 @@ private:
int _avatarSimsPerSecondReport {0};
quint64 _lastAvatarSimsPerSecondUpdate {0};
Camera _myCamera; // My view onto the world
Camera _mirrorCamera; // Cammera for mirror view
Camera _mirrorCamera; // Camera for mirror view
QRect _mirrorViewRect;
Setting::Handle<QString> _previousScriptLocation;
@ -518,6 +519,12 @@ private:
std::mutex _preRenderLambdasLock;
std::atomic<uint32_t> _processOctreeStatsCounter { 0 };
bool _keyboardDeviceHasFocus { true };
bool _recentlyClearedDomain { false };
QString _returnFromFullScreenMirrorTo;
};
#endif // hifi_Application_h

View file

@ -63,6 +63,11 @@ bool Bookmarks::contains(const QString& name) const {
void Bookmarks::readFromFile() {
QFile loadFile(_bookmarksFilename);
if (!loadFile.exists()) {
// User has not yet saved bookmarks
return;
}
if (!loadFile.open(QIODevice::ReadOnly)) {
qWarning("Couldn't open bookmarks file for reading");
return;

View file

@ -219,12 +219,12 @@ Menu::Menu() {
// View > First Person
cameraModeGroup->addAction(addCheckableActionToQMenuAndActionHash(viewMenu,
MenuOption::FirstPerson, 0, // QML Qt:: Key_P
false, qApp, SLOT(cameraMenuChanged())));
true, qApp, SLOT(cameraMenuChanged())));
// View > Third Person
cameraModeGroup->addAction(addCheckableActionToQMenuAndActionHash(viewMenu,
MenuOption::ThirdPerson, 0,
true, qApp, SLOT(cameraMenuChanged())));
false, qApp, SLOT(cameraMenuChanged())));
// View > Mirror
cameraModeGroup->addAction(addCheckableActionToQMenuAndActionHash(viewMenu,
@ -530,6 +530,9 @@ Menu::Menu() {
addCheckableActionToQMenuAndActionHash(timingMenu, MenuOption::PipelineWarnings);
addCheckableActionToQMenuAndActionHash(timingMenu, MenuOption::LogExtraTimings);
addCheckableActionToQMenuAndActionHash(timingMenu, MenuOption::SuppressShortTimings);
addCheckableActionToQMenuAndActionHash(timingMenu, MenuOption::SupressDeadlockWatchdogStatus, 0, false,
qApp, SLOT(toggleSuppressDeadlockWatchdogStatus(bool)));
// Developer > Audio >>>
MenuWrapper* audioDebugMenu = developerMenu->addMenu("Audio");

View file

@ -162,6 +162,7 @@ namespace MenuOption {
const QString Stats = "Stats";
const QString StopAllScripts = "Stop All Scripts";
const QString SuppressShortTimings = "Suppress Timings Less than 10ms";
const QString SupressDeadlockWatchdogStatus = "Supress Deadlock Watchdog Status";
const QString ThirdPerson = "Third Person";
const QString ThreePointCalibration = "3 Point Calibration";
const QString ThrottleFPSIfNotFocus = "Throttle FPS If Not Focus"; // FIXME - this value duplicated in Basic2DWindowOpenGLDisplayPlugin.cpp

View file

@ -32,8 +32,8 @@ AudioInjector::AudioInjector(QObject* parent) :
}
AudioInjector::AudioInjector(Sound* sound, const AudioInjectorOptions& injectorOptions) :
_audioData(sound->getByteArray()),
AudioInjector::AudioInjector(const Sound& sound, const AudioInjectorOptions& injectorOptions) :
_audioData(sound.getByteArray()),
_options(injectorOptions)
{

View file

@ -45,7 +45,7 @@ public:
};
AudioInjector(QObject* parent);
AudioInjector(Sound* sound, const AudioInjectorOptions& injectorOptions);
AudioInjector(const Sound& sound, const AudioInjectorOptions& injectorOptions);
AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions);
bool isFinished() const { return _state == State::Finished; }

View file

@ -23,13 +23,15 @@
#include "AudioRingBuffer.h"
static const QString RING_BUFFER_OVERFLOW_DEBUG { "AudioRingBuffer::writeData has overflown the buffer. Overwriting old data." };
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) :
_frameCapacity(numFramesCapacity),
_sampleCapacity(numFrameSamples * numFramesCapacity),
_bufferLength(numFrameSamples * (numFramesCapacity + 1)),
_numFrameSamples(numFrameSamples),
_randomAccessMode(randomAccessMode),
_overflowCount(0)
_frameCapacity(numFramesCapacity),
_sampleCapacity(numFrameSamples * numFramesCapacity),
_bufferLength(numFrameSamples * (numFramesCapacity + 1)),
_numFrameSamples(numFrameSamples),
_randomAccessMode(randomAccessMode),
_overflowCount(0)
{
if (numFrameSamples) {
_buffer = new int16_t[_bufferLength];
@ -41,6 +43,8 @@ _overflowCount(0)
_nextOutput = NULL;
_endOfLastWrite = NULL;
}
static QString repeatedMessage = LogHandler::getInstance().addRepeatedMessageRegex(RING_BUFFER_OVERFLOW_DEBUG);
};
AudioRingBuffer::~AudioRingBuffer() {
@ -131,8 +135,6 @@ int AudioRingBuffer::writeData(const char* data, int maxSize) {
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
_overflowCount++;
const QString RING_BUFFER_OVERFLOW_DEBUG { "AudioRingBuffer::writeData has overflown the buffer. Overwriting old data." };
static QString repeatedMessage = LogHandler::getInstance().addRepeatedMessageRegex(RING_BUFFER_OVERFLOW_DEBUG);
qCDebug(audio) << qPrintable(RING_BUFFER_OVERFLOW_DEBUG);
}
@ -179,7 +181,12 @@ int AudioRingBuffer::addSilentSamples(int silentSamples) {
if (silentSamples > samplesRoomFor) {
// there's not enough room for this write. write as many silent samples as we have room for
silentSamples = samplesRoomFor;
qCDebug(audio) << "Dropping some silent samples to prevent ring buffer overflow";
static const QString DROPPED_SILENT_DEBUG {
"AudioRingBuffer::addSilentSamples dropping silent samples to prevent overflow."
};
static QString repeatedMessage = LogHandler::getInstance().addRepeatedMessageRegex(DROPPED_SILENT_DEBUG);
qCDebug(audio) << qPrintable(DROPPED_SILENT_DEBUG);
}
// memset zeroes into the buffer, accomodate a wrap around the end
@ -243,7 +250,7 @@ int AudioRingBuffer::writeSamples(ConstIterator source, int maxSamples) {
int samplesToDelete = samplesToCopy - samplesRoomFor;
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
_overflowCount++;
qCDebug(audio) << "Overflowed ring buffer! Overwriting old data";
qCDebug(audio) << qPrintable(RING_BUFFER_OVERFLOW_DEBUG);
}
int16_t* bufferLast = _buffer + _bufferLength - 1;
@ -264,7 +271,7 @@ int AudioRingBuffer::writeSamplesWithFade(ConstIterator source, int maxSamples,
int samplesToDelete = samplesToCopy - samplesRoomFor;
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
_overflowCount++;
qCDebug(audio) << "Overflowed ring buffer! Overwriting old data";
qCDebug(audio) << qPrintable(RING_BUFFER_OVERFLOW_DEBUG);
}
int16_t* bufferLast = _buffer + _bufferLength - 1;

View file

@ -27,22 +27,18 @@
#include "AudioLogging.h"
#include "Sound.h"
static int soundMetaTypeId = qRegisterMetaType<Sound*>();
QScriptValue soundSharedPointerToScriptValue(QScriptEngine* engine, SharedSoundPointer const& in) {
return engine->newQObject(in.data());
QScriptValue soundSharedPointerToScriptValue(QScriptEngine* engine, const SharedSoundPointer& in) {
return engine->newQObject(new SoundScriptingInterface(in), QScriptEngine::ScriptOwnership);
}
void soundSharedPointerFromScriptValue(const QScriptValue& object, SharedSoundPointer &out) {
out = SharedSoundPointer(qobject_cast<Sound*>(object.toQObject()));
void soundSharedPointerFromScriptValue(const QScriptValue& object, SharedSoundPointer& out) {
if (auto soundInterface = qobject_cast<SoundScriptingInterface*>(object.toQObject())) {
out = soundInterface->getSound();
}
}
QScriptValue soundPointerToScriptValue(QScriptEngine* engine, Sound* const& in) {
return engine->newQObject(in);
}
void soundPointerFromScriptValue(const QScriptValue &object, Sound* &out) {
out = qobject_cast<Sound*>(object.toQObject());
SoundScriptingInterface::SoundScriptingInterface(SharedSoundPointer sound) : _sound(sound) {
QObject::connect(sound.data(), &Sound::ready, this, &SoundScriptingInterface::ready);
}
Sound::Sound(const QUrl& url, bool isStereo) :

View file

@ -20,18 +20,16 @@
class Sound : public Resource {
Q_OBJECT
Q_PROPERTY(bool downloaded READ isReady)
Q_PROPERTY(float duration READ getDuration)
public:
Sound(const QUrl& url, bool isStereo = false);
bool isStereo() const { return _isStereo; }
bool isReady() const { return _isReady; }
float getDuration() { return _duration; }
float getDuration() const { return _duration; }
const QByteArray& getByteArray() { return _byteArray; }
const QByteArray& getByteArray() const { return _byteArray; }
signals:
void ready();
@ -50,13 +48,28 @@ private:
typedef QSharedPointer<Sound> SharedSoundPointer;
class SoundScriptingInterface : public QObject {
Q_OBJECT
Q_PROPERTY(bool downloaded READ isReady)
Q_PROPERTY(float duration READ getDuration)
public:
SoundScriptingInterface(SharedSoundPointer sound);
SharedSoundPointer getSound() { return _sound; }
bool isReady() const { return _sound->isReady(); }
float getDuration() { return _sound->getDuration(); }
signals:
void ready();
private:
SharedSoundPointer _sound;
};
Q_DECLARE_METATYPE(SharedSoundPointer)
QScriptValue soundSharedPointerToScriptValue(QScriptEngine* engine, SharedSoundPointer const& in);
void soundSharedPointerFromScriptValue(const QScriptValue& object, SharedSoundPointer &out);
Q_DECLARE_METATYPE(Sound*)
QScriptValue soundPointerToScriptValue(QScriptEngine* engine, Sound* const& in);
void soundPointerFromScriptValue(const QScriptValue& object, Sound* &out);
QScriptValue soundSharedPointerToScriptValue(QScriptEngine* engine, const SharedSoundPointer& in);
void soundSharedPointerFromScriptValue(const QScriptValue& object, SharedSoundPointer& out);
#endif // hifi_Sound_h

View file

@ -33,8 +33,10 @@ static QVariantMap createDeviceMap(const controller::InputDevice::Pointer device
for (const auto& inputMapping : userInputMapper->getAvailableInputs(device->getDeviceID())) {
const auto& input = inputMapping.first;
const auto inputName = QString(inputMapping.second).remove(SANITIZE_NAME_EXPRESSION);
#ifdef DEBUG
qCDebug(controllers) << "\tInput " << input.getChannel() << (int)input.getType()
<< QString::number(input.getID(), 16) << ": " << inputName;
#endif
deviceMap.insert(inputName, input.getID());
}
return deviceMap;

View file

@ -19,32 +19,39 @@
namespace controller {
static QStringList stateVariables;
void StateController::setStateVariables(const QStringList& newStateVariables) {
stateVariables = newStateVariables;
}
StateController::StateController() : InputDevice("Application") {
_deviceID = UserInputMapper::STATE_DEVICE;
for (const auto& variable : stateVariables) {
_namedReadLambdas[variable] = []()->float{ return 0; };
}
}
StateController::~StateController() {
}
void StateController::update(float deltaTime, const InputCalibrationData& inputCalibrationData, bool jointsCaptured) {}
void StateController::focusOutEvent() {}
void StateController::addInputVariant(QString name, ReadLambda lambda) {
_namedReadLambdas.push_back(NamedReadLambda(name, lambda));
void StateController::setInputVariant(const QString& name, ReadLambda lambda) {
// All state variables must be predeclared;
Q_ASSERT(_namedReadLambdas.contains(name));
_namedReadLambdas[name] = lambda;
}
Input::NamedVector StateController::getAvailableInputs() const {
Input::NamedVector availableInputs;
int i = 0;
for (auto& pair : _namedReadLambdas) {
availableInputs.push_back(Input::NamedPair(Input(_deviceID, i, ChannelType::BUTTON), pair.first));
for (const auto& name : stateVariables) {
availableInputs.push_back(Input::NamedPair(Input(_deviceID, i, ChannelType::BUTTON), name));
i++;
}
return availableInputs;
}
EndpointPointer StateController::createEndpoint(const Input& input) const {
return std::make_shared<LambdaEndpoint>(_namedReadLambdas[input.getChannel()].second);
auto name = stateVariables[input.getChannel()];
ReadLambda& readLambda = const_cast<QHash<QString, ReadLambda>&>(_namedReadLambdas)[name];
return std::make_shared<LambdaRefEndpoint>(readLambda);
}
}

View file

@ -24,26 +24,28 @@ class StateController : public QObject, public InputDevice {
Q_PROPERTY(QString name READ getName)
public:
using Pointer = std::shared_ptr<StateController>;
using ReadLambda = std::function<float()>;
using NamedReadLambda = QPair<QString, ReadLambda>;
static void setStateVariables(const QStringList& stateVariables);
StateController();
const QString& getName() const { return _name; }
// Device functions
virtual Input::NamedVector getAvailableInputs() const override;
virtual void update(float deltaTime, const InputCalibrationData& inputCalibrationData, bool jointsCaptured) override;
virtual void focusOutEvent() override;
StateController();
virtual ~StateController();
void update(float deltaTime, const InputCalibrationData& inputCalibrationData, bool jointsCaptured) override {}
void focusOutEvent() override {}
using ReadLambda = std::function<float()>;
using NamedReadLambda = QPair<QString, ReadLambda>;
void addInputVariant(QString name, ReadLambda lambda);
virtual EndpointPointer createEndpoint(const Input& input) const override;
void setInputVariant(const QString& name, ReadLambda lambda);
EndpointPointer createEndpoint(const Input& input) const override;
protected:
QVector<NamedReadLambda> _namedReadLambdas;
QHash<QString, ReadLambda> _namedReadLambdas;
};
}

View file

@ -44,13 +44,15 @@
namespace controller {
const uint16_t UserInputMapper::ACTIONS_DEVICE = Input::INVALID_DEVICE - 0xFF;
const uint16_t UserInputMapper::STANDARD_DEVICE = 0;
const uint16_t UserInputMapper::ACTIONS_DEVICE = Input::INVALID_DEVICE - 0x00FF;
const uint16_t UserInputMapper::STATE_DEVICE = Input::INVALID_DEVICE - 0x0100;
}
// Default contruct allocate the poutput size with the current hardcoded action channels
controller::UserInputMapper::UserInputMapper() {
registerDevice(std::make_shared<ActionsDevice>());
registerDevice(_stateDevice = std::make_shared<StateController>());
registerDevice(std::make_shared<StandardController>());
}
@ -138,7 +140,6 @@ void UserInputMapper::loadDefaultMapping(uint16 deviceID) {
return;
}
auto mapping = loadMappings(proxyEntry->second->getDefaultMappingConfigs());
if (mapping) {
auto prevMapping = _mappingsByDevice[deviceID];
@ -235,6 +236,10 @@ void fixBisectedAxis(float& full, float& negative, float& positive) {
void UserInputMapper::update(float deltaTime) {
Locker locker(_lock);
static uint64_t updateCount = 0;
++updateCount;
// Reset the axis state for next loop
for (auto& channel : _actionStates) {
channel = 0.0f;
@ -694,11 +699,17 @@ Pose UserInputMapper::getPose(const Input& input) const {
return getPose(endpoint);
}
Mapping::Pointer UserInputMapper::loadMapping(const QString& jsonFile) {
Mapping::Pointer UserInputMapper::loadMapping(const QString& jsonFile, bool enable) {
Locker locker(_lock);
if (jsonFile.isEmpty()) {
return Mapping::Pointer();
}
// Each mapping only needs to be loaded once
static QSet<QString> loaded;
if (loaded.contains(jsonFile)) {
return Mapping::Pointer();
}
loaded.insert(jsonFile);
QString json;
{
QFile file(jsonFile);
@ -707,7 +718,11 @@ Mapping::Pointer UserInputMapper::loadMapping(const QString& jsonFile) {
}
file.close();
}
return parseMapping(json);
auto result = parseMapping(json);
if (enable) {
enableMapping(result->name);
}
return result;
}
MappingPointer UserInputMapper::loadMappings(const QStringList& jsonFiles) {
@ -961,7 +976,7 @@ Route::Pointer UserInputMapper::parseRoute(const QJsonValue& value) {
result->json = QString(QJsonDocument(obj).toJson());
result->source = parseSource(obj[JSON_CHANNEL_FROM]);
result->debug = obj[JSON_CHANNEL_DEBUG].toBool();
result->debug = obj[JSON_CHANNEL_PEEK].toBool();
result->peek = obj[JSON_CHANNEL_PEEK].toBool();
if (!result->source) {
qWarning() << "Invalid route source " << obj[JSON_CHANNEL_FROM];
return Route::Pointer();
@ -1033,7 +1048,7 @@ Mapping::Pointer UserInputMapper::parseMapping(const QJsonValue& json) {
Route::Pointer route = parseRoute(channelIt);
if (!route) {
qWarning() << "Couldn't parse route";
qWarning() << "Couldn't parse route:" << mapping->name << QString(QJsonDocument(channelIt.toObject()).toJson());
continue;
}

View file

@ -30,6 +30,7 @@
#include "DeviceProxy.h"
#include "StandardControls.h"
#include "Actions.h"
#include "StateController.h"
namespace controller {
@ -55,8 +56,9 @@ namespace controller {
using uint16 = uint16_t;
using uint32 = uint32_t;
static const uint16_t ACTIONS_DEVICE;
static const uint16_t STANDARD_DEVICE;
static const uint16_t ACTIONS_DEVICE;
static const uint16_t STATE_DEVICE;
UserInputMapper();
virtual ~UserInputMapper();
@ -100,10 +102,11 @@ namespace controller {
const DevicesMap& getDevices() { return _registeredDevices; }
uint16 getStandardDeviceID() const { return STANDARD_DEVICE; }
InputDevice::Pointer getStandardDevice() { return _registeredDevices[getStandardDeviceID()]; }
StateController::Pointer getStateDevice() { return _stateDevice; }
MappingPointer newMapping(const QString& mappingName);
MappingPointer parseMapping(const QString& json);
MappingPointer loadMapping(const QString& jsonFile);
MappingPointer loadMapping(const QString& jsonFile, bool enable = false);
MappingPointer loadMappings(const QStringList& jsonFiles);
void loadDefaultMapping(uint16 deviceID);
@ -120,6 +123,7 @@ namespace controller {
// GetFreeDeviceID should be called before registering a device to use an ID not used by a different device.
uint16 getFreeDeviceID() { return _nextFreeDeviceID++; }
DevicesMap _registeredDevices;
StateController::Pointer _stateDevice;
uint16 _nextFreeDeviceID = STANDARD_DEVICE + 1;
std::vector<float> _actionStates = std::vector<float>(toInt(Action::NUM_ACTIONS), 0.0f);

View file

@ -12,5 +12,8 @@
// warning LNK4221: This object file does not define any previously undefined public symbols,
// so it will not be used by any link operation that consumes this library
//
//#include "Endpoint.h"
#include "Endpoint.h"
namespace controller {
Endpoint::WriteLambda DEFAULT_WRITE_LAMBDA = [](float) {};
}

View file

@ -67,6 +67,23 @@ namespace controller {
WriteLambda _writeLambda;
};
extern Endpoint::WriteLambda DEFAULT_WRITE_LAMBDA;
class LambdaRefEndpoint : public Endpoint {
public:
using Endpoint::apply;
LambdaRefEndpoint(const ReadLambda& readLambda, const WriteLambda& writeLambda = DEFAULT_WRITE_LAMBDA)
: Endpoint(Input::INVALID_INPUT), _readLambda(readLambda), _writeLambda(writeLambda) {
}
virtual float peek() const override { return _readLambda(); }
virtual void apply(float value, const Pointer& source) override { _writeLambda(value); }
private:
const ReadLambda& _readLambda;
const WriteLambda& _writeLambda;
};
class VirtualEndpoint : public Endpoint {
public:

View file

@ -467,8 +467,6 @@ void CompositorHelper::toggle() {
}
}
glm::mat4 CompositorHelper::getReticleTransform(const glm::mat4& eyePose, const glm::vec3& headPosition) const {
glm::mat4 result;
if (isHMD()) {
@ -487,7 +485,7 @@ glm::mat4 CompositorHelper::getReticleTransform(const glm::mat4& eyePose, const
pointerTransform[3] = vec4(cursorRay + headPosition, 1);
// Scale up the cursor because of distance
reticleScale *= reticleDepth;
}
}
glm::mat4 overlayXfm;
_modelTransform.getMatrix(overlayXfm);
pointerTransform = overlayXfm * pointerTransform;
@ -503,7 +501,7 @@ glm::mat4 CompositorHelper::getReticleTransform(const glm::mat4& eyePose, const
mousePosition.y *= -1.0f;
vec2 mouseSize = CURSOR_PIXEL_SIZE / canvasSize;
return glm::scale(glm::translate(glm::mat4(), vec3(mousePosition, 0.0f)), vec3(mouseSize, 1.0f));
result = glm::scale(glm::translate(glm::mat4(), vec3(mousePosition, 0.0f)), vec3(mouseSize, 1.0f));
}
return result;
}

View file

@ -176,7 +176,7 @@ private:
bool _reticleOverQml { false };
bool _allowMouseCapture { true };
std::atomic<bool> _allowMouseCapture { true };
bool _fakeMouseEvent { false };

View file

@ -245,8 +245,10 @@ bool OpenGLDisplayPlugin::activate() {
#if THREADED_PRESENT
// Start the present thread if necessary
auto presentThread = DependencyManager::get<PresentThread>();
if (!presentThread) {
QSharedPointer<PresentThread> presentThread;
if (DependencyManager::isSet<PresentThread>()) {
presentThread = DependencyManager::get<PresentThread>();
} else {
auto widget = _container->getPrimaryWidget();
DependencyManager::set<PresentThread>();
presentThread = DependencyManager::get<PresentThread>();

View file

@ -71,8 +71,8 @@ EntityTreeRenderer::EntityTreeRenderer(bool wantScripts, AbstractViewStateInterf
}
EntityTreeRenderer::~EntityTreeRenderer() {
// NOTE: we don't need to delete _entitiesScriptEngine because it is registered with the application and has a
// signal tied to call it's deleteLater on doneRunning
// NOTE: We don't need to delete _entitiesScriptEngine because
// it is registered with ScriptEngines, which will call deleteLater for us.
}
void EntityTreeRenderer::clear() {
@ -297,7 +297,14 @@ void EntityTreeRenderer::applyZonePropertiesToScene(std::shared_ptr<ZoneEntityIt
auto sceneLocation = sceneStage->getLocation();
auto sceneTime = sceneStage->getTime();
// Skybox and procedural skybox data
auto skybox = std::dynamic_pointer_cast<ProceduralSkybox>(skyStage->getSkybox());
static QString userData;
if (!zone) {
userData = QString();
skybox->clear();
_pendingSkyboxTexture = false;
_skyboxTexture.clear();
@ -373,9 +380,7 @@ void EntityTreeRenderer::applyZonePropertiesToScene(std::shared_ptr<ZoneEntityIt
switch (zone->getBackgroundMode()) {
case BACKGROUND_MODE_SKYBOX: {
auto skybox = std::dynamic_pointer_cast<ProceduralSkybox>(skyStage->getSkybox());
skybox->setColor(zone->getSkyboxProperties().getColorVec3());
static QString userData;
if (userData != zone->getUserData()) {
userData = zone->getUserData();
skybox->parse(userData);
@ -414,9 +419,15 @@ void EntityTreeRenderer::applyZonePropertiesToScene(std::shared_ptr<ZoneEntityIt
case BACKGROUND_MODE_INHERIT:
default:
skyStage->setBackgroundMode(model::SunSkyStage::SKY_DOME); // let the application background through
_pendingSkyboxTexture = false;
// Clear the skybox to release its textures
userData = QString();
skybox->clear();
_skyboxTexture.clear();
_pendingSkyboxTexture = false;
// Let the application background through
skyStage->setBackgroundMode(model::SunSkyStage::SKY_DOME);
break;
}

View file

@ -92,7 +92,7 @@ private:
public: \
virtual bool addToScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges) override { return _renderHelper.addToScene(self, scene, pendingChanges); } \
virtual void removeFromScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges) override { _renderHelper.removeFromScene(self, scene, pendingChanges); } \
virtual void locationChanged() override { EntityItem::locationChanged(); _renderHelper.notifyChanged(); } \
virtual void locationChanged(bool tellPhysics = true) override { EntityItem::locationChanged(tellPhysics); _renderHelper.notifyChanged(); } \
virtual void dimensionsChanged() override { EntityItem::dimensionsChanged(); _renderHelper.notifyChanged(); } \
private: \
SimpleRenderableEntityItem _renderHelper;

View file

@ -271,10 +271,10 @@ bool RenderableModelEntityItem::getAnimationFrame() {
return false;
}
if (!hasAnimation() || !_jointMappingCompleted) {
if (!hasRenderAnimation() || !_jointMappingCompleted) {
return false;
}
AnimationPointer myAnimation = getAnimation(_animationProperties.getURL()); // FIXME: this could be optimized
AnimationPointer myAnimation = getAnimation(getRenderAnimationURL()); // FIXME: this could be optimized
if (myAnimation && myAnimation->isLoaded()) {
const QVector<FBXAnimationFrame>& frames = myAnimation->getFramesReference(); // NOTE: getFrames() is too heavy
@ -384,7 +384,7 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
}
if (_model) {
if (hasAnimation()) {
if (hasRenderAnimation()) {
if (!jointsMapped()) {
QStringList modelJointNames = _model->getJointNames();
mapJoints(modelJointNames);
@ -434,6 +434,8 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
_showCollisionHull = shouldShowCollisionHull;
render::PendingChanges pendingChanges;
_model->removeFromScene(scene, pendingChanges);
render::Item::Status::Getters statusGetters;
makeEntityItemStatusGetters(getThisPointer(), statusGetters);
_model->addToScene(scene, pendingChanges, statusGetters, _showCollisionHull);
@ -526,6 +528,9 @@ void RenderableModelEntityItem::update(const quint64& now) {
}
}
// make a copy of the animation properites
_renderAnimationProperties = _animationProperties;
ModelEntityItem::update(now);
}
@ -793,8 +798,8 @@ void RenderableModelEntityItem::setJointTranslationsSet(const QVector<bool>& tra
}
void RenderableModelEntityItem::locationChanged() {
EntityItem::locationChanged();
void RenderableModelEntityItem::locationChanged(bool tellPhysics) {
EntityItem::locationChanged(tellPhysics);
if (_model && _model->isActive()) {
_model->setRotation(getRotation());
_model->setTranslation(getPosition());

View file

@ -75,13 +75,18 @@ public:
virtual void setJointTranslationsSet(const QVector<bool>& translationsSet) override;
virtual void loader() override;
virtual void locationChanged() override;
virtual void locationChanged(bool tellPhysics = true) override;
virtual void resizeJointArrays(int newSize = -1) override;
virtual int getJointIndex(const QString& name) const override;
virtual QStringList getJointNames() const override;
// These operate on a copy of the renderAnimationProperties, so they can be accessed
// without having the entityTree lock.
bool hasRenderAnimation() const { return !_renderAnimationProperties.getURL().isEmpty(); }
const QString& getRenderAnimationURL() const { return _renderAnimationProperties.getURL(); }
private:
QVariantMap parseTexturesToMap(QString textures);
void remapTextures();
@ -97,6 +102,8 @@ private:
QVector<QVector<glm::vec3>> _points;
bool _dimensionsInitialized = true;
AnimationPropertyGroup _renderAnimationProperties;
render::ItemID _myMetaItem{ render::Item::INVALID_ITEM_ID };
bool _showCollisionHull = false;

View file

@ -39,6 +39,7 @@ public:
InterpolationData<float> radius;
InterpolationData<glm::vec4> color; // rgba
float lifespan;
glm::vec3 spare;
};
struct ParticlePrimitive {

View file

@ -29,7 +29,7 @@ public:
virtual void removeFromScene(EntityItemPointer self, render::ScenePointer scene, render::PendingChanges& pendingChanges) override;
protected:
virtual void locationChanged() override { EntityItem::locationChanged(); notifyBoundChanged(); }
virtual void locationChanged(bool tellPhysics = true) override { EntityItem::locationChanged(tellPhysics); notifyBoundChanged(); }
virtual void dimensionsChanged() override { EntityItem::dimensionsChanged(); notifyBoundChanged(); }
void notifyBoundChanged();

View file

@ -42,7 +42,7 @@ public:
virtual void removeFromScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges);
private:
virtual void locationChanged() override { EntityItem::locationChanged(); notifyBoundChanged(); }
virtual void locationChanged(bool tellPhysics = true) override { EntityItem::locationChanged(tellPhysics); notifyBoundChanged(); }
virtual void dimensionsChanged() override { EntityItem::dimensionsChanged(); notifyBoundChanged(); }
void notifyBoundChanged();

View file

@ -30,10 +30,10 @@ struct Colors {
struct ParticleUniforms {
Radii radius;
Colors color;
float lifespan;
vec4 lifespan; // x is lifespan, 3 spare floats
};
uniform particleBuffer {
layout(std140) uniform particleBuffer {
ParticleUniforms particle;
};
@ -112,7 +112,7 @@ void main(void) {
int twoTriID = gl_VertexID - particleID * NUM_VERTICES_PER_PARTICLE;
// Particle properties
float age = inColor.x / particle.lifespan;
float age = inColor.x / particle.lifespan.x;
float seed = inColor.y;
// Pass the texcoord and the z texcoord is representing the texture icon

View file

@ -888,6 +888,9 @@ void EntityItem::simulateKinematicMotion(float timeElapsed, bool setFlags) {
if (hasActions()) {
return;
}
if (!_parentID.isNull()) {
return;
}
if (hasLocalAngularVelocity()) {
glm::vec3 localAngularVelocity = getLocalAngularVelocity();
@ -1973,9 +1976,16 @@ QList<EntityActionPointer> EntityItem::getActionsOfType(EntityActionType typeToG
return result;
}
void EntityItem::locationChanged() {
void EntityItem::locationChanged(bool tellPhysics) {
requiresRecalcBoxes();
SpatiallyNestable::locationChanged(); // tell all the children, also
if (tellPhysics) {
_dirtyFlags |= Simulation::DIRTY_TRANSFORM;
}
EntityTreePointer tree = getTree();
if (tree) {
tree->entityChanged(getThisPointer());
}
SpatiallyNestable::locationChanged(tellPhysics); // tell all the children, also
}
void EntityItem::dimensionsChanged() {
@ -1984,6 +1994,7 @@ void EntityItem::dimensionsChanged() {
}
void EntityItem::globalizeProperties(EntityItemProperties& properties, const QString& messageTemplate, const glm::vec3& offset) const {
// TODO -- combine this with convertLocationToScriptSemantics
bool success;
auto globalPosition = getPosition(success);
if (success) {

View file

@ -432,7 +432,7 @@ protected:
const QByteArray getActionDataInternal() const;
void setActionDataInternal(QByteArray actionData);
virtual void locationChanged() override;
virtual void locationChanged(bool tellPhysics = true) override;
virtual void dimensionsChanged() override;
EntityTypes::EntityType _type;

View file

@ -475,13 +475,20 @@ QVector<QUuid> EntityScriptingInterface::findEntitiesInBox(const glm::vec3& corn
return result;
}
RayToEntityIntersectionResult EntityScriptingInterface::findRayIntersection(const PickRay& ray, bool precisionPicking, const QScriptValue& entityIdsToInclude, const QScriptValue& entityIdsToDiscard) {
RayToEntityIntersectionResult EntityScriptingInterface::findRayIntersection(const PickRay& ray, bool precisionPicking,
const QScriptValue& entityIdsToInclude, const QScriptValue& entityIdsToDiscard) {
QVector<EntityItemID> entitiesToInclude = qVectorEntityItemIDFromScriptValue(entityIdsToInclude);
QVector<EntityItemID> entitiesToDiscard = qVectorEntityItemIDFromScriptValue(entityIdsToDiscard);
return findRayIntersectionWorker(ray, Octree::TryLock, precisionPicking, entitiesToInclude, entitiesToDiscard);
return findRayIntersectionWorker(ray, Octree::Lock, precisionPicking, entitiesToInclude, entitiesToDiscard);
}
RayToEntityIntersectionResult EntityScriptingInterface::findRayIntersectionBlocking(const PickRay& ray, bool precisionPicking, const QScriptValue& entityIdsToInclude, const QScriptValue& entityIdsToDiscard) {
// FIXME - we should remove this API and encourage all users to use findRayIntersection() instead. We've changed
// findRayIntersection() to be blocking because it never makes sense for a script to get back a non-answer
RayToEntityIntersectionResult EntityScriptingInterface::findRayIntersectionBlocking(const PickRay& ray, bool precisionPicking,
const QScriptValue& entityIdsToInclude, const QScriptValue& entityIdsToDiscard) {
qWarning() << "Entities.findRayIntersectionBlocking() is obsolete, use Entities.findRayIntersection() instead.";
const QVector<EntityItemID>& entitiesToInclude = qVectorEntityItemIDFromScriptValue(entityIdsToInclude);
const QVector<EntityItemID> entitiesToDiscard = qVectorEntityItemIDFromScriptValue(entityIdsToDiscard);
return findRayIntersectionWorker(ray, Octree::Lock, precisionPicking, entitiesToInclude, entitiesToDiscard);

View file

@ -1323,7 +1323,9 @@ QVector<EntityItemID> EntityTree::sendEntities(EntityEditPacketSender* packetSen
// We need to keep a map so that we can map parent identifiers correctly.
QHash<EntityItemID, EntityItemID> map;
args.map = &map;
recurseTreeWithOperation(sendEntitiesOperation, &args);
withReadLock([&] {
recurseTreeWithOperation(sendEntitiesOperation, &args);
});
packetSender->releaseQueuedMessages();
return map.values().toVector();

View file

@ -9,17 +9,20 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <iostream>
#include <QBuffer>
#include <QDataStream>
#include <QIODevice>
#include <QStringList>
#include <QTextStream>
#include <QtDebug>
#include <QtEndian>
#include <QFileInfo>
#include "FBXReader.h"
#include <iostream>
#include <QtCore/QBuffer>
#include <QtCore/QDataStream>
#include <QtCore/QIODevice>
#include <QtCore/QStringList>
#include <QtCore/QTextStream>
#include <QtCore/QDebug>
#include <QtCore/QtEndian>
#include <QtCore/QFileInfo>
#include <shared/NsightHelpers.h>
template<class T> int streamSize() {
return sizeof(T);
}
@ -299,6 +302,7 @@ FBXNode parseTextFBXNode(Tokenizer& tokenizer) {
}
FBXNode FBXReader::parseFBX(QIODevice* device) {
PROFILE_RANGE_EX(__FUNCTION__, 0xff0000ff, device);
// verify the prolog
const QByteArray BINARY_PROLOG = "Kaydara FBX Binary ";
if (device->peek(BINARY_PROLOG.size()) != BINARY_PROLOG) {

View file

@ -12,17 +12,20 @@
// http://www.scratchapixel.com/old/lessons/3d-advanced-lessons/obj-file-format/obj-file-format/
// http://paulbourke.net/dataformats/obj/
#include "OBJReader.h"
#include <QBuffer>
#include <QIODevice>
#include <QtNetwork/QNetworkAccessManager>
#include <QtNetwork/QNetworkRequest>
#include <QEventLoop>
#include <ctype.h> // .obj files are not locale-specific. The C/ASCII charset applies.
#include <QtCore/QBuffer>
#include <QtCore/QIODevice>
#include <QtCore/QEventLoop>
#include <QtNetwork/QNetworkAccessManager>
#include <QtNetwork/QNetworkRequest>
#include <shared/NsightHelpers.h>
#include <NetworkAccessManager.h>
#include "FBXReader.h"
#include "OBJReader.h"
#include "ModelFormatLogging.h"
QHash<QString, float> COMMENT_SCALE_HINTS = {{"This file uses centimeters as units", 1.0f / 100.0f},
@ -404,7 +407,7 @@ done:
FBXGeometry* OBJReader::readOBJ(QByteArray& model, const QVariantHash& mapping, const QUrl& url) {
PROFILE_RANGE_EX(__FUNCTION__, 0xffff0000, nullptr);
QBuffer buffer { &model };
buffer.open(QIODevice::ReadOnly);

View file

@ -12,7 +12,7 @@ const QSurfaceFormat& getDefaultOpenGLSurfaceFormat() {
// Qt Quick may need a depth and stencil buffer. Always make sure these are available.
format.setDepthBufferSize(DEFAULT_GL_DEPTH_BUFFER_BITS);
format.setStencilBufferSize(DEFAULT_GL_STENCIL_BUFFER_BITS);
format.setVersion(4, 1);
format.setVersion(4, 5);
#ifdef DEBUG
format.setOption(QSurfaceFormat::DebugContext);
#endif
@ -27,7 +27,7 @@ const QGLFormat& getDefaultGLFormat() {
static QGLFormat glFormat;
static std::once_flag once;
std::call_once(once, [] {
glFormat.setVersion(4, 1);
glFormat.setVersion(4, 5);
glFormat.setProfile(QGLFormat::CoreProfile); // Requires >=Qt-4.8.0
glFormat.setSampleBuffers(false);
glFormat.setDepth(false);

View file

@ -83,3 +83,8 @@ void OffscreenGLCanvas::doneCurrent() {
QObject* OffscreenGLCanvas::getContextObject() {
return _context;
}
void OffscreenGLCanvas::moveToThreadWithContext(QThread* thread) {
moveToThread(thread);
_context->moveToThread(thread);
}

View file

@ -26,6 +26,7 @@ public:
bool create(QOpenGLContext* sharedContext = nullptr);
bool makeCurrent();
void doneCurrent();
void moveToThreadWithContext(QThread* thread);
QOpenGLContext* getContext() {
return _context;
}

View file

@ -6,8 +6,10 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "OglplusHelpers.h"
#include <QSharedPointer>
#include <set>
#include <oglplus/shapes/plane.hpp>
#include <oglplus/shapes/sky_box.hpp>
using namespace oglplus;
using namespace oglplus::shapes;
@ -20,11 +22,13 @@ uniform mat4 mvp = mat4(1);
in vec3 Position;
in vec2 TexCoord;
out vec3 vPosition;
out vec2 vTexCoord;
void main() {
gl_Position = mvp * vec4(Position, 1);
vTexCoord = TexCoord ;
vTexCoord = TexCoord;
vPosition = Position;
}
)VS";
@ -35,7 +39,9 @@ static const char * SIMPLE_TEXTURED_FS = R"FS(#version 410 core
uniform sampler2D sampler;
uniform float alpha = 1.0;
in vec3 vPosition;
in vec2 vTexCoord;
out vec4 FragColor;
void main() {
@ -47,12 +53,38 @@ void main() {
)FS";
static const char * SIMPLE_TEXTURED_CUBEMAP_FS = R"FS(#version 410 core
#pragma line __LINE__
uniform samplerCube sampler;
uniform float alpha = 1.0;
in vec3 vPosition;
in vec3 vTexCoord;
out vec4 FragColor;
void main() {
FragColor = texture(sampler, vPosition);
FragColor.a *= alpha;
}
)FS";
ProgramPtr loadDefaultShader() {
ProgramPtr result;
compileProgram(result, SIMPLE_TEXTURED_VS, SIMPLE_TEXTURED_FS);
return result;
}
ProgramPtr loadCubemapShader() {
ProgramPtr result;
compileProgram(result, SIMPLE_TEXTURED_VS, SIMPLE_TEXTURED_CUBEMAP_FS);
return result;
}
void compileProgram(ProgramPtr & result, const std::string& vs, const std::string& fs) {
using namespace oglplus;
try {
@ -93,6 +125,10 @@ ShapeWrapperPtr loadPlane(ProgramPtr program, float aspect) {
);
}
ShapeWrapperPtr loadSkybox(ProgramPtr program) {
return ShapeWrapperPtr(new shapes::ShapeWrapper({ { "Position" } }, shapes::SkyBox(), *program));
}
// Return a point's cartesian coordinates on a sphere from pitch and yaw
static glm::vec3 getPoint(float yaw, float pitch) {
return glm::vec3(glm::cos(-pitch) * (-glm::sin(yaw)),

View file

@ -37,7 +37,6 @@
#include <oglplus/bound/framebuffer.hpp>
#include <oglplus/bound/renderbuffer.hpp>
#include <oglplus/shapes/wrapper.hpp>
#include <oglplus/shapes/plane.hpp>
#ifdef _WIN32
#pragma warning(pop)
@ -55,7 +54,9 @@ using ProgramPtr = std::shared_ptr<oglplus::Program>;
using Mat4Uniform = oglplus::Uniform<mat4>;
ProgramPtr loadDefaultShader();
ProgramPtr loadCubemapShader();
void compileProgram(ProgramPtr & result, const std::string& vs, const std::string& fs);
ShapeWrapperPtr loadSkybox(ProgramPtr program);
ShapeWrapperPtr loadPlane(ProgramPtr program, float aspect = 1.0f);
ShapeWrapperPtr loadSphereSection(ProgramPtr program, float fov = PI / 3.0f * 2.0f, float aspect = 16.0f / 9.0f, int slices = 32, int stacks = 32);

View file

@ -13,6 +13,9 @@
#include <QOpenGLContext>
QOpenGLContext* QOpenGLContextWrapper::currentContext() {
return QOpenGLContext::currentContext();
}
QOpenGLContextWrapper::QOpenGLContextWrapper() :
_context(new QOpenGLContext)

View file

@ -19,7 +19,6 @@ class QSurfaceFormat;
class QOpenGLContextWrapper {
public:
QOpenGLContextWrapper();
void setFormat(const QSurfaceFormat& format);
bool create();
void swapBuffers(QSurface* surface);
@ -27,6 +26,8 @@ public:
void doneCurrent();
void setShareContext(QOpenGLContext* otherContext);
static QOpenGLContext* currentContext();
QOpenGLContext* getContext() {
return _context;
}

View file

@ -125,6 +125,7 @@ GLBackend::GLBackend() {
glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &_uboAlignment);
initInput();
initTransform();
initTextureTransferHelper();
}
GLBackend::~GLBackend() {

View file

@ -24,6 +24,8 @@
namespace gpu {
class GLTextureTransferHelper;
class GLBackend : public Backend {
// Context Backend static interface required
@ -35,7 +37,6 @@ class GLBackend : public Backend {
explicit GLBackend(bool syncCache);
GLBackend();
public:
virtual ~GLBackend();
virtual void render(Batch& batch);
@ -75,25 +76,63 @@ public:
class GLTexture : public GPUObject {
public:
Stamp _storageStamp;
Stamp _contentStamp;
GLuint _texture;
GLenum _target;
const Stamp _storageStamp;
Stamp _contentStamp { 0 };
const GLuint _texture;
const GLenum _target;
GLTexture();
GLTexture(const gpu::Texture& gpuTexture);
~GLTexture();
void setSize(GLuint size);
GLuint size() const { return _size; }
enum SyncState {
// The texture is currently undergoing no processing, although it's content
// may be out of date, or it's storage may be invalid relative to the
// owning GPU texture
Idle,
// The texture has been queued for transfer to the GPU
Pending,
// The texture has been transferred to the GPU, but is awaiting
// any post transfer operations that may need to occur on the
// primary rendering thread
Transferred,
};
void setSyncState(SyncState syncState) { _syncState = syncState; }
SyncState getSyncState() const { return _syncState; }
// Is the storage out of date relative to the gpu texture?
bool isInvalid() const;
// Is the content out of date relative to the gpu texture?
bool isOutdated() const;
// Is the texture in a state where it can be rendered with no work?
bool isReady() const;
// Move the image bits from the CPU to the GPU
void transfer() const;
// Execute any post-move operations that must occur only on the main thread
void postTransfer();
static const size_t CUBE_NUM_FACES = 6;
static const GLenum CUBE_FACE_LAYOUT[6];
private:
GLuint _size;
void transferMip(GLenum target, const Texture::PixelsPointer& mip) const;
const GLuint _size;
// The owning texture
const Texture& _gpuTexture;
std::atomic<SyncState> _syncState { SyncState::Idle };
};
static GLTexture* syncGPUObject(const Texture& texture);
static GLTexture* syncGPUObject(const TexturePointer& texture);
static GLuint getTextureID(const TexturePointer& texture, bool sync = true);
// very specific for now
static void syncSampler(const Sampler& sampler, Texture::Type type, GLTexture* object);
static void syncSampler(const Sampler& sampler, Texture::Type type, const GLTexture* object);
class GLShader : public GPUObject {
public:
@ -241,6 +280,11 @@ protected:
void renderPassTransfer(Batch& batch);
void renderPassDraw(Batch& batch);
void initTextureTransferHelper();
static void transferGPUObject(const TexturePointer& texture);
static std::shared_ptr<GLTextureTransferHelper> _textureTransferHelper;
// Draw Stage
void do_draw(Batch& batch, size_t paramOffset);
void do_drawIndexed(Batch& batch, size_t paramOffset);
@ -484,6 +528,7 @@ protected:
typedef void (GLBackend::*CommandCall)(Batch&, size_t);
static CommandCall _commandCalls[Batch::NUM_COMMANDS];
};
};

View file

@ -83,7 +83,7 @@ GLBackend::GLFramebuffer* GLBackend::syncGPUObject(const Framebuffer& framebuffe
for (auto& b : framebuffer.getRenderBuffers()) {
surface = b._texture;
if (surface) {
gltexture = GLBackend::syncGPUObject(*surface);
gltexture = GLBackend::syncGPUObject(surface);
} else {
gltexture = nullptr;
}
@ -123,7 +123,7 @@ GLBackend::GLFramebuffer* GLBackend::syncGPUObject(const Framebuffer& framebuffe
if (framebuffer.getDepthStamp() != object->_depthStamp) {
auto surface = framebuffer.getDepthStencilBuffer();
if (framebuffer.hasDepthStencil() && surface) {
gltexture = GLBackend::syncGPUObject(*surface);
gltexture = GLBackend::syncGPUObject(surface);
}
if (gltexture) {

View file

@ -255,7 +255,7 @@ void GLBackend::do_setResourceTexture(Batch& batch, size_t paramOffset) {
_stats._RSNumTextureBounded++;
// Always make sure the GLObject is in sync
GLTexture* object = GLBackend::syncGPUObject(*resourceTexture);
GLTexture* object = GLBackend::syncGPUObject(resourceTexture);
if (object) {
GLuint to = object->_texture;
GLuint target = object->_target;

View file

@ -9,18 +9,79 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GPULogging.h"
#include <QtCore/QThread>
#include "GLBackendShared.h"
#include "GLTexelFormat.h"
#include "GLBackendTextureTransfer.h"
using namespace gpu;
GLBackend::GLTexture::GLTexture() :
_storageStamp(0),
_contentStamp(0),
_texture(0),
_target(GL_TEXTURE_2D),
_size(0)
GLenum gpuToGLTextureType(const Texture& texture) {
switch (texture.getType()) {
case Texture::TEX_2D:
return GL_TEXTURE_2D;
break;
case Texture::TEX_CUBE:
return GL_TEXTURE_CUBE_MAP;
break;
default:
qFatal("Unsupported texture type");
}
Q_UNREACHABLE();
return GL_TEXTURE_2D;
}
GLuint allocateSingleTexture() {
GLuint result;
glGenTextures(1, &result);
return result;
}
const GLenum GLBackend::GLTexture::CUBE_FACE_LAYOUT[6] = {
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
// Create the texture and allocate storage
GLBackend::GLTexture::GLTexture(const Texture& texture) :
_storageStamp(texture.getStamp()), _texture(allocateSingleTexture()),
_target(gpuToGLTextureType(texture)), _size((GLuint)texture.getSize()), _gpuTexture(texture)
{
Backend::incrementTextureGPUCount();
Backend::updateTextureGPUMemoryUsage(0, _size);
Backend::setGPUObject(texture, this);
GLsizei width = texture.getWidth();
GLsizei height = texture.getHeight();
GLsizei levels = 1;
if (texture.maxMip() > 0) {
if (texture.isAutogenerateMips()) {
while ((width | height) >> levels) {
++levels;
}
}
levels = std::max(1, std::min(texture.maxMip() + 1, levels));
}
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat());
withPreservedTexture(_target, [&] {
glBindTexture(_target, _texture);
(void)CHECK_GL_ERROR();
// GO through the process of allocating the correct storage
if (GLEW_VERSION_4_2) {
glTexStorage2D(_target, levels, texelFormat.internalFormat, width, height);
} else {
glTexImage2D(_target, 0, texelFormat.internalFormat, width, height, 0, texelFormat.format, texelFormat.type, 0);
}
(void)CHECK_GL_ERROR();
syncSampler(texture.getSampler(), texture.getType(), this);
(void)CHECK_GL_ERROR();
});
}
GLBackend::GLTexture::~GLTexture() {
@ -31,562 +92,163 @@ GLBackend::GLTexture::~GLTexture() {
Backend::decrementTextureGPUCount();
}
void GLBackend::GLTexture::setSize(GLuint size) {
Backend::updateTextureGPUMemoryUsage(_size, size);
_size = size;
bool GLBackend::GLTexture::isInvalid() const {
return _storageStamp < _gpuTexture.getStamp();
}
class GLTexelFormat {
public:
GLenum internalFormat;
GLenum format;
GLenum type;
bool GLBackend::GLTexture::isOutdated() const {
return _contentStamp < _gpuTexture.getDataStamp();
}
static GLTexelFormat evalGLTexelFormat(const Element& dstFormat, const Element& srcFormat) {
if (dstFormat != srcFormat) {
GLTexelFormat texel = {GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE};
switch(dstFormat.getDimension()) {
case gpu::SCALAR: {
texel.format = GL_RED;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch(dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RED;
break;
case gpu::DEPTH:
texel.internalFormat = GL_DEPTH_COMPONENT;
break;
case gpu::DEPTH_STENCIL:
texel.type = GL_UNSIGNED_INT_24_8;
texel.format = GL_DEPTH_STENCIL;
texel.internalFormat = GL_DEPTH24_STENCIL8;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC2: {
texel.format = GL_RG;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch(dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RG;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC3: {
texel.format = GL_RGB;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch(dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RGB;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC4: {
texel.format = GL_RGBA;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch(srcFormat.getSemantic()) {
case gpu::BGRA:
case gpu::SBGRA:
texel.format = GL_BGRA;
break;
case gpu::RGB:
case gpu::RGBA:
case gpu::SRGB:
case gpu::SRGBA:
default:
break;
};
switch(dstFormat.getSemantic()) {
case gpu::RGB:
texel.internalFormat = GL_RGB;
break;
case gpu::RGBA:
texel.internalFormat = GL_RGBA;
break;
case gpu::SRGB:
texel.internalFormat = GL_SRGB;
break;
case gpu::SRGBA:
texel.internalFormat = GL_SRGB_ALPHA;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
return texel;
} else {
GLTexelFormat texel = {GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE};
switch(dstFormat.getDimension()) {
case gpu::SCALAR: {
texel.format = GL_RED;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch(dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
case gpu::SRGB:
case gpu::SRGBA:
texel.internalFormat = GL_RED;
switch (dstFormat.getType()) {
case gpu::UINT32: {
texel.internalFormat = GL_R32UI;
break;
}
case gpu::INT32: {
texel.internalFormat = GL_R32I;
break;
}
case gpu::NUINT32: {
texel.internalFormat = GL_RED;
break;
}
case gpu::NINT32: {
texel.internalFormat = GL_RED_SNORM;
break;
}
case gpu::FLOAT: {
texel.internalFormat = GL_R32F;
break;
}
case gpu::UINT16: {
texel.internalFormat = GL_R16UI;
break;
}
case gpu::INT16: {
texel.internalFormat = GL_R16I;
break;
}
case gpu::NUINT16: {
texel.internalFormat = GL_R16;
break;
}
case gpu::NINT16: {
texel.internalFormat = GL_R16_SNORM;
break;
}
case gpu::HALF: {
texel.internalFormat = GL_R16F;
break;
}
case gpu::UINT8: {
texel.internalFormat = GL_R8UI;
break;
}
case gpu::INT8: {
texel.internalFormat = GL_R8I;
break;
}
case gpu::NUINT8: {
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
texel.internalFormat = GL_SLUMINANCE;
} else {
texel.internalFormat = GL_R8;
}
break;
}
case gpu::NINT8: {
texel.internalFormat = GL_R8_SNORM;
break;
}
case gpu::NUM_TYPES: { // quiet compiler
Q_UNREACHABLE();
}
}
break;
case gpu::R11G11B10:
texel.format = GL_RGB;
// the type should be float
texel.internalFormat = GL_R11F_G11F_B10F;
break;
case gpu::DEPTH:
texel.format = GL_DEPTH_COMPONENT; // It's depth component to load it
texel.internalFormat = GL_DEPTH_COMPONENT;
switch (dstFormat.getType()) {
case gpu::UINT32:
case gpu::INT32:
case gpu::NUINT32:
case gpu::NINT32: {
texel.internalFormat = GL_DEPTH_COMPONENT32;
break;
}
case gpu::FLOAT: {
texel.internalFormat = GL_DEPTH_COMPONENT32F;
break;
}
case gpu::UINT16:
case gpu::INT16:
case gpu::NUINT16:
case gpu::NINT16:
case gpu::HALF: {
texel.internalFormat = GL_DEPTH_COMPONENT16;
break;
}
case gpu::UINT8:
case gpu::INT8:
case gpu::NUINT8:
case gpu::NINT8: {
texel.internalFormat = GL_DEPTH_COMPONENT24;
break;
}
case gpu::NUM_TYPES: { // quiet compiler
Q_UNREACHABLE();
}
}
break;
case gpu::DEPTH_STENCIL:
texel.type = GL_UNSIGNED_INT_24_8;
texel.format = GL_DEPTH_STENCIL;
texel.internalFormat = GL_DEPTH24_STENCIL8;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC2: {
texel.format = GL_RG;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch(dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RG;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC3: {
texel.format = GL_RGB;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch(dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RGB;
break;
case gpu::SRGB:
case gpu::SRGBA:
texel.internalFormat = GL_SRGB; // standard 2.2 gamma correction color
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC4: {
texel.format = GL_RGBA;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch(dstFormat.getSemantic()) {
case gpu::RGB:
texel.internalFormat = GL_RGB;
break;
case gpu::RGBA:
texel.internalFormat = GL_RGBA;
switch (dstFormat.getType()) {
case gpu::UINT32:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA32UI;
break;
case gpu::INT32:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA32I;
break;
case gpu::FLOAT:
texel.internalFormat = GL_RGBA32F;
break;
case gpu::UINT16:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA16UI;
break;
case gpu::INT16:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA16I;
break;
case gpu::NUINT16:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA16;
break;
case gpu::NINT16:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA16_SNORM;
break;
case gpu::HALF:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA16F;
break;
case gpu::UINT8:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA8UI;
break;
case gpu::INT8:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA8I;
break;
case gpu::NUINT8:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA8;
break;
case gpu::NINT8:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA8_SNORM;
break;
case gpu::NUINT32:
case gpu::NINT32:
case gpu::NUM_TYPES: // quiet compiler
Q_UNREACHABLE();
}
break;
case gpu::SRGB:
texel.internalFormat = GL_SRGB;
break;
case gpu::SRGBA:
texel.internalFormat = GL_SRGB_ALPHA; // standard 2.2 gamma correction color
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
return texel;
}
bool GLBackend::GLTexture::isReady() const {
// If we have an invalid texture, we're never ready
if (isInvalid()) {
return false;
}
};
// If we're out of date, but the transfer is in progress, report ready
// as a special case
auto syncState = _syncState.load();
GLBackend::GLTexture* GLBackend::syncGPUObject(const Texture& texture) {
GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(texture);
if (isOutdated()) {
return Pending == syncState;
}
// If GPU object already created and in sync
bool needUpdate = false;
if (object && (object->_storageStamp == texture.getStamp())) {
// If gpu object info is in sync with sysmem version
if (object->_contentStamp >= texture.getDataStamp()) {
// Then all good, GPU object is ready to be used
return object;
} else {
// Need to update the content of the GPU object from the source sysmem of the texture
needUpdate = true;
}
} else if (!texture.isDefined()) {
return Idle == syncState;
}
//#define USE_PBO
// Move content bits from the CPU to the GPU for a given mip / face
void GLBackend::GLTexture::transferMip(GLenum target, const Texture::PixelsPointer& mip) const {
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuTexture.getTexelFormat(), mip->getFormat());
#ifdef USE_PBO
GLuint pixelBufferID;
glGenBuffers(1, &pixelBufferID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pixelBufferID);
//if (GLEW_VERSION_4_4) {
// glBufferStorage(GL_PIXEL_UNPACK_BUFFER, mip->getSize(), nullptr, GL_STREAM_DRAW);
//} else {
glBufferData(GL_PIXEL_UNPACK_BUFFER, mip->getSize(), nullptr, GL_STREAM_DRAW);
//}
void* mappedBuffer = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
memcpy(mappedBuffer, mip->readData(), mip->getSize());
//// use while PBO is still bound, assumes GL_TEXTURE_2D and offset 0
glTexSubImage2D(target, 0, 0, 0, _gpuTexture.getWidth(), _gpuTexture.getHeight(), texelFormat.format, texelFormat.type, 0);
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glDeleteBuffers(1, &pixelBufferID);
#else
//glTexImage2D(target, 0, internalFormat, texture.getWidth(), texture.getHeight(), 0, texelFormat.format, texelFormat.type, bytes);
glTexSubImage2D(target, 0, 0, 0, _gpuTexture.getWidth(), _gpuTexture.getHeight(), texelFormat.format, texelFormat.type, mip->readData());
(void)CHECK_GL_ERROR();
#endif
}
// Move content bits from the CPU to the GPU
void GLBackend::GLTexture::transfer() const {
PROFILE_RANGE(__FUNCTION__);
qDebug() << "Transferring texture: " << _texture;
// Need to update the content of the GPU object from the source sysmem of the texture
if (_contentStamp >= _gpuTexture.getDataStamp()) {
return;
}
glBindTexture(_target, _texture);
// GO through the process of allocating the correct storage and/or update the content
switch (_gpuTexture.getType()) {
case Texture::TEX_2D:
if (_gpuTexture.isStoredMipFaceAvailable(0)) {
transferMip(GL_TEXTURE_2D, _gpuTexture.accessStoredMipFace(0));
}
break;
case Texture::TEX_CUBE:
// transfer pixels from each faces
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
if (_gpuTexture.isStoredMipFaceAvailable(0, f)) {
transferMip(CUBE_FACE_LAYOUT[f], _gpuTexture.accessStoredMipFace(0, f));
}
}
break;
default:
qCWarning(gpulogging) << __FUNCTION__ << " case for Texture Type " << _gpuTexture.getType() << " not supported";
break;
}
if (_gpuTexture.isAutogenerateMips()) {
glGenerateMipmap(_target);
(void)CHECK_GL_ERROR();
}
}
// Do any post-transfer operations that might be required on the main context / rendering thread
void GLBackend::GLTexture::postTransfer() {
setSyncState(GLTexture::Idle);
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
switch (_gpuTexture.getType()) {
case Texture::TEX_2D:
_gpuTexture.notifyMipFaceGPULoaded(0, 0);
break;
case Texture::TEX_CUBE:
for (uint8_t f = 0; f < CUBE_NUM_FACES; ++f) {
_gpuTexture.notifyMipFaceGPULoaded(0, f);
}
break;
default:
qCWarning(gpulogging) << __FUNCTION__ << " case for Texture Type " << _gpuTexture.getType() << " not supported";
break;
}
}
GLBackend::GLTexture* GLBackend::syncGPUObject(const TexturePointer& texturePointer) {
const Texture& texture = *texturePointer;
if (!texture.isDefined()) {
// NO texture definition yet so let's avoid thinking
return nullptr;
}
// If the object hasn't been created, or the object definition is out of date, drop and re-create
GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(texture);
if (object && object->isReady()) {
return object;
}
// Object isn't ready, check what we need to do...
// Create the texture if need be (force re-creation if the storage stamp changes
// for easier use of immutable storage)
if (!object || object->isInvalid()) {
// This automatically destroys the old texture
object = new GLTexture(texture);
}
// need to have a gpu object?
if (!object) {
object = new GLTexture();
glGenTextures(1, &object->_texture);
(void) CHECK_GL_ERROR();
Backend::setGPUObject(texture, object);
if (texture.getNumSlices() != 1) {
return object;
}
// GO through the process of allocating the correct storage and/or update the content
switch (texture.getType()) {
case Texture::TEX_2D: {
if (texture.getNumSlices() == 1) {
GLint boundTex = -1;
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
glBindTexture(GL_TEXTURE_2D, object->_texture);
if (needUpdate) {
if (texture.isStoredMipFaceAvailable(0)) {
Texture::PixelsPointer mip = texture.accessStoredMipFace(0);
const GLvoid* bytes = mip->readData();
Element srcFormat = mip->getFormat();
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat(), srcFormat);
glBindTexture(GL_TEXTURE_2D, object->_texture);
glTexSubImage2D(GL_TEXTURE_2D, 0,
texelFormat.internalFormat, texture.getWidth(), texture.getHeight(), 0,
texelFormat.format, texelFormat.type, bytes);
if (texture.isAutogenerateMips()) {
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
}
object->_target = GL_TEXTURE_2D;
syncSampler(texture.getSampler(), texture.getType(), object);
// At this point the mip piels have been loaded, we can notify
texture.notifyMipFaceGPULoaded(0, 0);
object->_contentStamp = texture.getDataStamp();
}
} else {
const GLvoid* bytes = 0;
Element srcFormat = texture.getTexelFormat();
if (texture.isStoredMipFaceAvailable(0)) {
Texture::PixelsPointer mip = texture.accessStoredMipFace(0);
bytes = mip->readData();
srcFormat = mip->getFormat();
object->_contentStamp = texture.getDataStamp();
}
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat(), srcFormat);
glTexImage2D(GL_TEXTURE_2D, 0,
texelFormat.internalFormat, texture.getWidth(), texture.getHeight(), 0,
texelFormat.format, texelFormat.type, bytes);
if (bytes && texture.isAutogenerateMips()) {
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
}
object->_target = GL_TEXTURE_2D;
syncSampler(texture.getSampler(), texture.getType(), object);
// At this point the mip pixels have been loaded, we can notify
texture.notifyMipFaceGPULoaded(0, 0);
object->_storageStamp = texture.getStamp();
object->_contentStamp = texture.getDataStamp();
object->setSize((GLuint)texture.getSize());
}
glBindTexture(GL_TEXTURE_2D, boundTex);
}
break;
// Object might be outdated, if so, start the transfer
// (outdated objects that are already in transfer will have reported 'true' for ready()
if (object->isOutdated()) {
_textureTransferHelper->transferTexture(texturePointer);
}
case Texture::TEX_CUBE: {
if (texture.getNumSlices() == 1) {
GLint boundTex = -1;
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
glBindTexture(GL_TEXTURE_CUBE_MAP, object->_texture);
const int NUM_FACES = 6;
const GLenum FACE_LAYOUT[] = {
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z };
if (needUpdate) {
glBindTexture(GL_TEXTURE_CUBE_MAP, object->_texture);
// transfer pixels from each faces
for (int f = 0; f < NUM_FACES; f++) {
if (texture.isStoredMipFaceAvailable(0, f)) {
Texture::PixelsPointer mipFace = texture.accessStoredMipFace(0, f);
Element srcFormat = mipFace->getFormat();
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat(), srcFormat);
glTexSubImage2D(FACE_LAYOUT[f], 0, texelFormat.internalFormat, texture.getWidth(), texture.getWidth(), 0,
texelFormat.format, texelFormat.type, (GLvoid*) (mipFace->readData()));
// At this point the mip pixels have been loaded, we can notify
texture.notifyMipFaceGPULoaded(0, f);
}
}
if (texture.isAutogenerateMips()) {
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
}
object->_target = GL_TEXTURE_CUBE_MAP;
syncSampler(texture.getSampler(), texture.getType(), object);
object->_contentStamp = texture.getDataStamp();
} else {
glBindTexture(GL_TEXTURE_CUBE_MAP, object->_texture);
// transfer pixels from each faces
for (int f = 0; f < NUM_FACES; f++) {
if (texture.isStoredMipFaceAvailable(0, f)) {
Texture::PixelsPointer mipFace = texture.accessStoredMipFace(0, f);
Element srcFormat = mipFace->getFormat();
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat(), srcFormat);
glTexImage2D(FACE_LAYOUT[f], 0, texelFormat.internalFormat, texture.getWidth(), texture.getWidth(), 0,
texelFormat.format, texelFormat.type, (GLvoid*) (mipFace->readData()));
// At this point the mip pixels have been loaded, we can notify
texture.notifyMipFaceGPULoaded(0, f);
}
}
if (texture.isAutogenerateMips()) {
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
} else {
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
object->_target = GL_TEXTURE_CUBE_MAP;
syncSampler(texture.getSampler(), texture.getType(), object);
object->_storageStamp = texture.getStamp();
object->_contentStamp = texture.getDataStamp();
object->setSize((GLuint)texture.getSize());
}
glBindTexture(GL_TEXTURE_CUBE_MAP, boundTex);
}
break;
if (GLTexture::Transferred == object->getSyncState()) {
object->postTransfer();
}
default:
qCDebug(gpulogging) << "GLBackend::syncGPUObject(const Texture&) case for Texture Type " << texture.getType() << " not supported";
}
(void) CHECK_GL_ERROR();
return object;
}
std::shared_ptr<GLTextureTransferHelper> GLBackend::_textureTransferHelper;
void GLBackend::initTextureTransferHelper() {
_textureTransferHelper = std::make_shared<GLTextureTransferHelper>();
}
GLuint GLBackend::getTextureID(const TexturePointer& texture, bool sync) {
if (!texture) {
@ -594,7 +256,7 @@ GLuint GLBackend::getTextureID(const TexturePointer& texture, bool sync) {
}
GLTexture* object { nullptr };
if (sync) {
object = GLBackend::syncGPUObject(*texture);
object = GLBackend::syncGPUObject(texture);
} else {
object = Backend::getGPUObject<GLBackend::GLTexture>(*texture);
}
@ -605,38 +267,37 @@ GLuint GLBackend::getTextureID(const TexturePointer& texture, bool sync) {
}
}
void GLBackend::syncSampler(const Sampler& sampler, Texture::Type type, GLTexture* object) {
void GLBackend::syncSampler(const Sampler& sampler, Texture::Type type, const GLTexture* object) {
if (!object) return;
if (!object->_texture) return;
class GLFilterMode {
public:
GLint minFilter;
GLint magFilter;
};
static const GLFilterMode filterModes[] = {
{GL_NEAREST, GL_NEAREST}, //FILTER_MIN_MAG_POINT,
{GL_NEAREST, GL_LINEAR}, //FILTER_MIN_POINT_MAG_LINEAR,
{GL_LINEAR, GL_NEAREST}, //FILTER_MIN_LINEAR_MAG_POINT,
{GL_LINEAR, GL_LINEAR}, //FILTER_MIN_MAG_LINEAR,
{GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST}, //FILTER_MIN_MAG_MIP_POINT,
{GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST}, //FILTER_MIN_MAG_MIP_POINT,
{GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST}, //FILTER_MIN_MAG_POINT_MIP_LINEAR,
{GL_NEAREST_MIPMAP_NEAREST, GL_LINEAR}, //FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT,
{GL_NEAREST_MIPMAP_LINEAR, GL_LINEAR}, //FILTER_MIN_POINT_MAG_MIP_LINEAR,
{GL_LINEAR_MIPMAP_NEAREST, GL_NEAREST}, //FILTER_MIN_LINEAR_MAG_MIP_POINT,
{GL_LINEAR_MIPMAP_LINEAR, GL_NEAREST}, //FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR,
{GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR}, //FILTER_MIN_MAG_LINEAR_MIP_POINT,
{GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR}, //FILTER_MIN_MAG_MIP_LINEAR,
{GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR} //FILTER_ANISOTROPIC,
static const GLFilterMode filterModes[] = {
{ GL_NEAREST, GL_NEAREST }, //FILTER_MIN_MAG_POINT,
{ GL_NEAREST, GL_LINEAR }, //FILTER_MIN_POINT_MAG_LINEAR,
{ GL_LINEAR, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_POINT,
{ GL_LINEAR, GL_LINEAR }, //FILTER_MIN_MAG_LINEAR,
{ GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST }, //FILTER_MIN_MAG_MIP_POINT,
{ GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST }, //FILTER_MIN_MAG_MIP_POINT,
{ GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST }, //FILTER_MIN_MAG_POINT_MIP_LINEAR,
{ GL_NEAREST_MIPMAP_NEAREST, GL_LINEAR }, //FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT,
{ GL_NEAREST_MIPMAP_LINEAR, GL_LINEAR }, //FILTER_MIN_POINT_MAG_MIP_LINEAR,
{ GL_LINEAR_MIPMAP_NEAREST, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_MIP_POINT,
{ GL_LINEAR_MIPMAP_LINEAR, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR,
{ GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR }, //FILTER_MIN_MAG_LINEAR_MIP_POINT,
{ GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR }, //FILTER_MIN_MAG_MIP_LINEAR,
{ GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR } //FILTER_ANISOTROPIC,
};
auto fm = filterModes[sampler.getFilter()];
glTexParameteri(object->_target, GL_TEXTURE_MIN_FILTER, fm.minFilter);
glTexParameteri(object->_target, GL_TEXTURE_MAG_FILTER, fm.magFilter);
static const GLenum comparisonFuncs[] = {
static const GLenum comparisonFuncs[] = {
GL_NEVER,
GL_LESS,
GL_EQUAL,
@ -653,7 +314,7 @@ void GLBackend::syncSampler(const Sampler& sampler, Texture::Type type, GLTextur
glTexParameteri(object->_target, GL_TEXTURE_COMPARE_MODE, GL_NONE);
}
static const GLenum wrapModes[] = {
static const GLenum wrapModes[] = {
GL_REPEAT, // WRAP_REPEAT,
GL_MIRRORED_REPEAT, // WRAP_MIRROR,
GL_CLAMP_TO_EDGE, // WRAP_CLAMP,
@ -664,23 +325,20 @@ void GLBackend::syncSampler(const Sampler& sampler, Texture::Type type, GLTextur
glTexParameteri(object->_target, GL_TEXTURE_WRAP_T, wrapModes[sampler.getWrapModeV()]);
glTexParameteri(object->_target, GL_TEXTURE_WRAP_R, wrapModes[sampler.getWrapModeW()]);
glTexParameterfv(object->_target, GL_TEXTURE_BORDER_COLOR, (const float*) &sampler.getBorderColor());
glTexParameterfv(object->_target, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
glTexParameteri(object->_target, GL_TEXTURE_BASE_LEVEL, sampler.getMipOffset());
glTexParameterf(object->_target, GL_TEXTURE_MIN_LOD, (float) sampler.getMinMip());
glTexParameterf(object->_target, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
glTexParameterf(object->_target, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
glTexParameterf(object->_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
}
void GLBackend::do_generateTextureMips(Batch& batch, size_t paramOffset) {
TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
if (!resourceTexture) {
return;
}
GLTexture* object = GLBackend::syncGPUObject(*resourceTexture);
GLTexture* object = GLBackend::syncGPUObject(resourceTexture);
if (!object) {
return;
}
@ -695,7 +353,7 @@ void GLBackend::do_generateTextureMips(Batch& batch, size_t paramOffset) {
if (freeSlot < 0) {
// If had to use slot 0 then restore state
GLTexture* boundObject = GLBackend::syncGPUObject(*_resource._textures[0]);
GLTexture* boundObject = GLBackend::syncGPUObject(_resource._textures[0]);
if (boundObject) {
glBindTexture(boundObject->_target, boundObject->_texture);
}

View file

@ -0,0 +1,132 @@
//
// Created by Bradley Austin Davis on 2016/04/03
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLBackendTextureTransfer.h"
#include "GLBackendShared.h"
#include "GLTexelFormat.h"
#ifdef THREADED_TEXTURE_TRANSFER
#include <gl/OffscreenGLCanvas.h>
#include <gl/QOpenGLContextWrapper.h>
//#define FORCE_DRAW_AFTER_TRANSFER
#ifdef FORCE_DRAW_AFTER_TRANSFER
#include <gl/OglplusHelpers.h>
static ProgramPtr _program;
static ProgramPtr _cubeProgram;
static ShapeWrapperPtr _plane;
static ShapeWrapperPtr _skybox;
static BasicFramebufferWrapperPtr _framebuffer;
#endif
#endif
using namespace gpu;
GLTextureTransferHelper::GLTextureTransferHelper() {
#ifdef THREADED_TEXTURE_TRANSFER
_canvas = std::make_shared<OffscreenGLCanvas>();
_canvas->create(QOpenGLContextWrapper::currentContext());
if (!_canvas->makeCurrent()) {
qFatal("Unable to create texture transfer context");
}
_canvas->doneCurrent();
initialize(true, QThread::LowPriority);
_canvas->moveToThreadWithContext(_thread);
#endif
}
void GLTextureTransferHelper::transferTexture(const gpu::TexturePointer& texturePointer) {
GLBackend::GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(*texturePointer);
#ifdef THREADED_TEXTURE_TRANSFER
TextureTransferPackage package { texturePointer, glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0) };
glFlush();
object->setSyncState(GLBackend::GLTexture::Pending);
queueItem(package);
#else
object->transfer();
object->postTransfer();
#endif
}
void GLTextureTransferHelper::setup() {
#ifdef THREADED_TEXTURE_TRANSFER
_canvas->makeCurrent();
#ifdef FORCE_DRAW_AFTER_TRANSFER
_program = loadDefaultShader();
_plane = loadPlane(_program);
_cubeProgram = loadCubemapShader();
_skybox = loadSkybox(_cubeProgram);
_framebuffer = std::make_shared<BasicFramebufferWrapper>();
_framebuffer->Init({ 100, 100 });
_framebuffer->fbo.Bind(oglplus::FramebufferTarget::Draw);
#endif
#endif
}
void GLTextureTransferHelper::shutdown() {
_canvas->doneCurrent();
_canvas->moveToThreadWithContext(qApp->thread());
}
bool GLTextureTransferHelper::processQueueItems(const Queue& messages) {
for (auto package : messages) {
glWaitSync(package.fence, 0, GL_TIMEOUT_IGNORED);
glDeleteSync(package.fence);
TexturePointer texturePointer = package.texture.lock();
// Texture no longer exists, move on to the next
if (!texturePointer) {
continue;
}
GLBackend::GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(*texturePointer);
object->transfer();
#ifdef FORCE_DRAW_AFTER_TRANSFER
// Now force a draw using the texture
try {
switch (texturePointer->getType()) {
case Texture::TEX_2D:
_program->Use();
_plane->Use();
_plane->Draw();
break;
case Texture::TEX_CUBE:
_cubeProgram->Use();
_skybox->Use();
_skybox->Draw();
break;
default:
qCWarning(gpulogging) << __FUNCTION__ << " case for Texture Type " << texturePointer->getType() << " not supported";
break;
}
} catch (const std::runtime_error& error) {
qWarning() << "Failed to render texture on background thread: " << error.what();
}
#endif
glBindTexture(object->_target, 0);
auto writeSync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glClientWaitSync(writeSync, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED);
glDeleteSync(writeSync);
object->_contentStamp = texturePointer->getDataStamp();
object->setSyncState(GLBackend::GLTexture::Transferred);
}
return true;
}

View file

@ -0,0 +1,62 @@
//
// Created by Bradley Austin Davis on 2016/04/03
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <GenericQueueThread.h>
#include "GLBackendShared.h"
#define THREADED_TEXTURE_TRANSFER
class OffscreenGLCanvas;
namespace gpu {
struct TextureTransferPackage {
std::weak_ptr<Texture> texture;
GLsync fence;
};
class GLTextureTransferHelper : public GenericQueueThread<TextureTransferPackage> {
public:
GLTextureTransferHelper();
void transferTexture(const gpu::TexturePointer& texturePointer);
void postTransfer(const gpu::TexturePointer& texturePointer);
protected:
void setup() override;
void shutdown() override;
bool processQueueItems(const Queue& messages) override;
void transferTextureSynchronous(const gpu::Texture& texture);
private:
std::shared_ptr<OffscreenGLCanvas> _canvas;
};
template <typename F>
void withPreservedTexture(GLenum target, F f) {
GLint boundTex = -1;
switch (target) {
case GL_TEXTURE_2D:
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
break;
case GL_TEXTURE_CUBE_MAP:
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
break;
default:
qFatal("Unsupported texture type");
}
(void)CHECK_GL_ERROR();
f();
glBindTexture(target, boundTex);
(void)CHECK_GL_ERROR();
}
}

View file

@ -0,0 +1,375 @@
//
// Created by Bradley Austin Davis on 2016/04/03
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLBackendShared.h"
class GLTexelFormat {
public:
GLenum internalFormat;
GLenum format;
GLenum type;
static GLTexelFormat evalGLTexelFormat(const gpu::Element& dstFormat) {
return evalGLTexelFormat(dstFormat, dstFormat);
}
static GLTexelFormat evalGLTexelFormat(const gpu::Element& dstFormat, const gpu::Element& srcFormat) {
using namespace gpu;
if (dstFormat != srcFormat) {
GLTexelFormat texel = { GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE };
switch (dstFormat.getDimension()) {
case gpu::SCALAR: {
texel.format = GL_RED;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_R8;
break;
case gpu::DEPTH:
texel.internalFormat = GL_DEPTH_COMPONENT32;
break;
case gpu::DEPTH_STENCIL:
texel.type = GL_UNSIGNED_INT_24_8;
texel.format = GL_DEPTH_STENCIL;
texel.internalFormat = GL_DEPTH24_STENCIL8;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC2: {
texel.format = GL_RG;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RG8;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC3: {
texel.format = GL_RGB;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RGB8;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC4: {
texel.format = GL_RGBA;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch (srcFormat.getSemantic()) {
case gpu::BGRA:
case gpu::SBGRA:
texel.format = GL_BGRA;
break;
case gpu::RGB:
case gpu::RGBA:
case gpu::SRGB:
case gpu::SRGBA:
default:
break;
};
switch (dstFormat.getSemantic()) {
case gpu::RGB:
texel.internalFormat = GL_RGB8;
break;
case gpu::RGBA:
texel.internalFormat = GL_RGBA8;
break;
case gpu::SRGB:
texel.internalFormat = GL_SRGB8;
break;
case gpu::SRGBA:
texel.internalFormat = GL_SRGB8_ALPHA8;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
return texel;
} else {
GLTexelFormat texel = { GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE };
switch (dstFormat.getDimension()) {
case gpu::SCALAR: {
texel.format = GL_RED;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
case gpu::SRGB:
case gpu::SRGBA:
texel.internalFormat = GL_R8;
switch (dstFormat.getType()) {
case gpu::UINT32: {
texel.internalFormat = GL_R32UI;
break;
}
case gpu::INT32: {
texel.internalFormat = GL_R32I;
break;
}
case gpu::NUINT32: {
texel.internalFormat = GL_R8;
break;
}
case gpu::NINT32: {
texel.internalFormat = GL_R8_SNORM;
break;
}
case gpu::FLOAT: {
texel.internalFormat = GL_R32F;
break;
}
case gpu::UINT16: {
texel.internalFormat = GL_R16UI;
break;
}
case gpu::INT16: {
texel.internalFormat = GL_R16I;
break;
}
case gpu::NUINT16: {
texel.internalFormat = GL_R16;
break;
}
case gpu::NINT16: {
texel.internalFormat = GL_R16_SNORM;
break;
}
case gpu::HALF: {
texel.internalFormat = GL_R16F;
break;
}
case gpu::UINT8: {
texel.internalFormat = GL_R8UI;
break;
}
case gpu::INT8: {
texel.internalFormat = GL_R8I;
break;
}
case gpu::NUINT8: {
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
texel.internalFormat = GL_SLUMINANCE8;
} else {
texel.internalFormat = GL_R8;
}
break;
}
case gpu::NINT8: {
texel.internalFormat = GL_R8_SNORM;
break;
}
case gpu::NUM_TYPES: { // quiet compiler
Q_UNREACHABLE();
}
}
break;
case gpu::R11G11B10:
texel.format = GL_RGB;
// the type should be float
texel.internalFormat = GL_R11F_G11F_B10F;
break;
case gpu::DEPTH:
texel.format = GL_DEPTH_COMPONENT; // It's depth component to load it
texel.internalFormat = GL_DEPTH_COMPONENT32;
switch (dstFormat.getType()) {
case gpu::UINT32:
case gpu::INT32:
case gpu::NUINT32:
case gpu::NINT32: {
texel.internalFormat = GL_DEPTH_COMPONENT32;
break;
}
case gpu::FLOAT: {
texel.internalFormat = GL_DEPTH_COMPONENT32F;
break;
}
case gpu::UINT16:
case gpu::INT16:
case gpu::NUINT16:
case gpu::NINT16:
case gpu::HALF: {
texel.internalFormat = GL_DEPTH_COMPONENT16;
break;
}
case gpu::UINT8:
case gpu::INT8:
case gpu::NUINT8:
case gpu::NINT8: {
texel.internalFormat = GL_DEPTH_COMPONENT24;
break;
}
case gpu::NUM_TYPES: { // quiet compiler
Q_UNREACHABLE();
}
}
break;
case gpu::DEPTH_STENCIL:
texel.type = GL_UNSIGNED_INT_24_8;
texel.format = GL_DEPTH_STENCIL;
texel.internalFormat = GL_DEPTH24_STENCIL8;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC2: {
texel.format = GL_RG;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RG8;
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC3: {
texel.format = GL_RGB;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RGB8;
break;
case gpu::SRGB:
case gpu::SRGBA:
texel.internalFormat = GL_SRGB8; // standard 2.2 gamma correction color
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC4: {
texel.format = GL_RGBA;
texel.type = _elementTypeToGLType[dstFormat.getType()];
switch (dstFormat.getSemantic()) {
case gpu::RGB:
texel.internalFormat = GL_RGB8;
break;
case gpu::RGBA:
texel.internalFormat = GL_RGBA8;
switch (dstFormat.getType()) {
case gpu::UINT32:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA32UI;
break;
case gpu::INT32:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA32I;
break;
case gpu::FLOAT:
texel.internalFormat = GL_RGBA32F;
break;
case gpu::UINT16:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA16UI;
break;
case gpu::INT16:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA16I;
break;
case gpu::NUINT16:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA16;
break;
case gpu::NINT16:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA16_SNORM;
break;
case gpu::HALF:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA16F;
break;
case gpu::UINT8:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA8UI;
break;
case gpu::INT8:
texel.format = GL_RGBA_INTEGER;
texel.internalFormat = GL_RGBA8I;
break;
case gpu::NUINT8:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA8;
break;
case gpu::NINT8:
texel.format = GL_RGBA;
texel.internalFormat = GL_RGBA8_SNORM;
break;
case gpu::NUINT32:
case gpu::NINT32:
case gpu::NUM_TYPES: // quiet compiler
Q_UNREACHABLE();
}
break;
case gpu::SRGB:
texel.internalFormat = GL_SRGB8;
break;
case gpu::SRGBA:
texel.internalFormat = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
break;
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
break;
}
default:
qCDebug(gpulogging) << "Unknown combination of texel format";
}
return texel;
}
}
};

View file

@ -10,6 +10,7 @@
//
#include "ModelCache.h"
#include <Finally.h>
#include <FSTReader.h>
#include "FBXReader.h"
#include "OBJReader.h"
@ -117,11 +118,12 @@ void GeometryReader::run() {
originalPriority = QThread::NormalPriority;
}
QThread::currentThread()->setPriority(QThread::LowPriority);
Finally setPriorityBackToNormal([originalPriority]() {
QThread::currentThread()->setPriority(originalPriority);
});
// Ensure the resource is still being requested
auto resource = _resource.toStrongRef();
if (!resource) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
if (!_resource.data()) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; resource was deleted";
return;
}
@ -146,17 +148,29 @@ void GeometryReader::run() {
throw QString("unsupported format");
}
QMetaObject::invokeMethod(resource.data(), "setGeometryDefinition",
Q_ARG(void*, fbxGeometry));
// Ensure the resource has not been deleted, and won't be while invokeMethod is in flight.
auto resource = _resource.toStrongRef();
if (!resource) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
delete fbxGeometry;
} else {
QMetaObject::invokeMethod(resource.data(), "setGeometryDefinition", Qt::BlockingQueuedConnection, Q_ARG(void*, fbxGeometry));
}
} else {
throw QString("url is invalid");
}
} catch (const QString& error) {
qCDebug(modelnetworking) << "Error reading " << _url << ": " << error;
QMetaObject::invokeMethod(resource.data(), "finishedLoading", Q_ARG(bool, false));
}
QThread::currentThread()->setPriority(originalPriority);
qCDebug(modelnetworking) << "Error reading " << _url << ": " << error;
auto resource = _resource.toStrongRef();
// Ensure the resoruce has not been deleted, and won't be while invokeMethod is in flight.
if (!resource) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
} else {
QMetaObject::invokeMethod(resource.data(), "finishedLoading", Qt::BlockingQueuedConnection, Q_ARG(bool, false));
}
}
}
class GeometryDefinitionResource : public GeometryResource {
@ -232,7 +246,7 @@ std::shared_ptr<NetworkGeometry> ModelCache::getGeometry(const QUrl& url, const
GeometryExtra geometryExtra = { mapping, textureBaseUrl };
GeometryResource::Pointer resource = getResource(url, QUrl(), true, &geometryExtra).staticCast<GeometryResource>();
if (resource) {
if (resource->isLoaded() && !resource->hasTextures()) {
if (resource->isLoaded() && resource->shouldSetTextures()) {
resource->setTextures();
}
return std::make_shared<NetworkGeometry>(resource);

View file

@ -107,7 +107,8 @@ protected:
friend class GeometryMappingResource;
// Geometries may not hold onto textures while cached - that is for the texture cache
bool hasTextures() const { return !_materials.empty(); }
// Instead, these methods clear and reset textures from the geometry when caching/loading
bool shouldSetTextures() const { return _geometry && _materials.empty(); }
void setTextures();
void resetTextures();

View file

@ -13,18 +13,22 @@
#include <mutex>
#include <glm/glm.hpp>
#include <glm/gtc/random.hpp>
#include <QNetworkReply>
#include <QPainter>
#include <QRunnable>
#include <QThreadPool>
#include <qimagereader.h>
#include <PathUtils.h>
#include <QImageReader>
#include <glm/glm.hpp>
#include <glm/gtc/random.hpp>
#include <gpu/Batch.h>
#include <shared/NsightHelpers.h>
#include <Finally.h>
#include <PathUtils.h>
#include "ModelNetworkingLogging.h"
TextureCache::TextureCache() {
@ -196,7 +200,7 @@ NetworkTexture::NetworkTexture(const QUrl& url, const TextureLoaderFunc& texture
{
_textureLoader = textureLoader;
}
NetworkTexture::TextureLoaderFunc NetworkTexture::getTextureLoader() const {
switch (_type) {
case CUBE_TEXTURE: {
@ -240,14 +244,14 @@ NetworkTexture::TextureLoaderFunc NetworkTexture::getTextureLoader() const {
class ImageReader : public QRunnable {
public:
ImageReader(const QWeakPointer<Resource>& texture, const QByteArray& data, const QUrl& url = QUrl());
ImageReader(const QWeakPointer<Resource>& resource, const QByteArray& data, const QUrl& url = QUrl());
virtual void run();
private:
static void listSupportedImageFormats();
QWeakPointer<Resource> _texture;
QWeakPointer<Resource> _resource;
QUrl _url;
QByteArray _content;
};
@ -261,9 +265,9 @@ void NetworkTexture::loadContent(const QByteArray& content) {
QThreadPool::globalInstance()->start(new ImageReader(_self, content, _url));
}
ImageReader::ImageReader(const QWeakPointer<Resource>& texture, const QByteArray& data,
ImageReader::ImageReader(const QWeakPointer<Resource>& resource, const QByteArray& data,
const QUrl& url) :
_texture(texture),
_resource(resource),
_url(url),
_content(data)
{
@ -278,31 +282,34 @@ void ImageReader::listSupportedImageFormats() {
}
void ImageReader::run() {
PROFILE_RANGE_EX(__FUNCTION__, 0xffff0000, nullptr);
auto originalPriority = QThread::currentThread()->priority();
if (originalPriority == QThread::InheritPriority) {
originalPriority = QThread::NormalPriority;
}
QThread::currentThread()->setPriority(QThread::LowPriority);
Finally restorePriority([originalPriority]{
QThread::currentThread()->setPriority(originalPriority);
});
auto texture = _texture.toStrongRef();
if (!texture) {
qCWarning(modelnetworking) << "Could not get strong ref";
if (!_resource.data()) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
return;
}
listSupportedImageFormats();
// try to help the QImage loader by extracting the image file format from the url filename ext
// Some tga are not created properly for example without it
// Help the QImage loader by extracting the image file format from the url filename ext.
// Some tga are not created properly without it.
auto filename = _url.fileName().toStdString();
auto filenameExtension = filename.substr(filename.find_last_of('.') + 1);
QImage image = QImage::fromData(_content, filenameExtension.c_str());
// Note that QImage.format is the pixel format which is different from the "format" of the image file...
auto imageFormat = image.format();
auto imageFormat = image.format();
int originalWidth = image.width();
int originalHeight = image.height();
if (originalWidth == 0 || originalHeight == 0 || imageFormat == QImage::Format_Invalid) {
if (filenameExtension.empty()) {
qCDebug(modelnetworking) << "QImage failed to create from content, no file extension:" << _url;
@ -312,25 +319,40 @@ void ImageReader::run() {
return;
}
gpu::Texture* theTexture = nullptr;
auto ntex = texture.dynamicCast<NetworkTexture>();
if (ntex) {
theTexture = ntex->getTextureLoader()(image, _url.toString().toStdString());
gpu::Texture* texture = nullptr;
{
// Double-check the resource still exists between long operations.
auto resource = _resource.toStrongRef();
if (!resource) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
return;
}
auto url = _url.toString().toStdString();
PROFILE_RANGE_EX(__FUNCTION__"::textureLoader", 0xffffff00, nullptr);
texture = resource.dynamicCast<NetworkTexture>()->getTextureLoader()(image, url);
}
QMetaObject::invokeMethod(texture.data(), "setImage",
Q_ARG(void*, theTexture),
Q_ARG(int, originalWidth), Q_ARG(int, originalHeight));
QThread::currentThread()->setPriority(originalPriority);
// Ensure the resource has not been deleted, and won't be while invokeMethod is in flight.
auto resource = _resource.toStrongRef();
if (!resource) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
delete texture;
} else {
QMetaObject::invokeMethod(resource.data(), "setImage", Qt::BlockingQueuedConnection,
Q_ARG(void*, texture),
Q_ARG(int, originalWidth), Q_ARG(int, originalHeight));
}
}
void NetworkTexture::setImage(void* voidTexture, int originalWidth,
int originalHeight) {
_originalWidth = originalWidth;
_originalHeight = originalHeight;
gpu::Texture* texture = static_cast<gpu::Texture*>(voidTexture);
// Passing ownership
_textureSource->resetTexture(texture);
auto gpuTexture = _textureSource->getGPUTexture();
@ -338,7 +360,7 @@ void NetworkTexture::setImage(void* voidTexture, int originalWidth,
if (gpuTexture) {
_width = gpuTexture->getWidth();
_height = gpuTexture->getHeight();
setBytes(gpuTexture->getStoredSize());
setSize(gpuTexture->getStoredSize());
} else {
// FIXME: If !gpuTexture, we failed to load!
_width = _height = 0;

View file

@ -35,6 +35,8 @@ public:
void setCubemap(const gpu::TexturePointer& cubemap);
const gpu::TexturePointer& getCubemap() const { return _cubemap; }
virtual void clear() { setCubemap(nullptr); }
void prepare(gpu::Batch& batch, int textureSlot = SKYBOX_SKYMAP_SLOT, int bufferSlot = SKYBOX_CONSTANTS_SLOT) const;
virtual void render(gpu::Batch& batch, const ViewFrustum& frustum) const;

View file

@ -336,6 +336,7 @@ void NodeList::sendDomainServerCheckIn() {
if (_numNoReplyDomainCheckIns >= MAX_SILENT_DOMAIN_SERVER_CHECK_INS) {
// we haven't heard back from DS in MAX_SILENT_DOMAIN_SERVER_CHECK_INS
// so emit our signal that says that
qDebug() << "Limit of silent domain checkins reached";
emit limitOfSilentDomainCheckInsReached();
}

View file

@ -38,6 +38,7 @@ ResourceCache::~ResourceCache() {
void ResourceCache::refreshAll() {
// Clear all unused resources so we don't have to reload them
clearUnusedResource();
resetResourceCounters();
// Refresh all remaining resources in use
foreach (auto resource, _resources) {
@ -53,9 +54,27 @@ void ResourceCache::refresh(const QUrl& url) {
resource->refresh();
} else {
_resources.remove(url);
resetResourceCounters();
}
}
QVariantList ResourceCache::getResourceList() {
QVariantList list;
if (QThread::currentThread() != thread()) {
// NOTE: invokeMethod does not allow a const QObject*
QMetaObject::invokeMethod(this, "getResourceList", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(QVariantList, list));
} else {
auto resources = _resources.uniqueKeys();
list.reserve(resources.size());
for (auto& resource : resources) {
list << resource;
}
}
return list;
}
void ResourceCache::setRequestLimit(int limit) {
_requestLimit = limit;
@ -114,6 +133,7 @@ QSharedPointer<Resource> ResourceCache::getResource(const QUrl& url, const QUrl&
void ResourceCache::setUnusedResourceCacheSize(qint64 unusedResourcesMaxSize) {
_unusedResourcesMaxSize = clamp(unusedResourcesMaxSize, MIN_UNUSED_MAX_SIZE, MAX_UNUSED_MAX_SIZE);
reserveUnusedResource(0);
resetResourceCounters();
}
void ResourceCache::addUnusedResource(const QSharedPointer<Resource>& resource) {
@ -127,6 +147,8 @@ void ResourceCache::addUnusedResource(const QSharedPointer<Resource>& resource)
resource->setLRUKey(++_lastLRUKey);
_unusedResources.insert(resource->getLRUKey(), resource);
_unusedResourcesSize += resource->getBytes();
resetResourceCounters();
}
void ResourceCache::removeUnusedResource(const QSharedPointer<Resource>& resource) {
@ -134,6 +156,7 @@ void ResourceCache::removeUnusedResource(const QSharedPointer<Resource>& resourc
_unusedResources.remove(resource->getLRUKey());
_unusedResourcesSize -= resource->getBytes();
}
resetResourceCounters();
}
void ResourceCache::reserveUnusedResource(qint64 resourceSize) {
@ -142,8 +165,13 @@ void ResourceCache::reserveUnusedResource(qint64 resourceSize) {
// unload the oldest resource
QMap<int, QSharedPointer<Resource> >::iterator it = _unusedResources.begin();
_unusedResourcesSize -= it.value()->getBytes();
it.value()->setCache(nullptr);
auto size = it.value()->getBytes();
_totalResourcesSize -= size;
_resources.remove(it.value()->getURL());
_unusedResourcesSize -= size;
_unusedResources.erase(it);
}
}
@ -159,6 +187,17 @@ void ResourceCache::clearUnusedResource() {
}
}
void ResourceCache::resetResourceCounters() {
_numTotalResources = _resources.size();
_numUnusedResources = _unusedResources.size();
emit dirty();
}
void ResourceCache::updateTotalSize(const qint64& oldSize, const qint64& newSize) {
_totalResourcesSize += (newSize - oldSize);
emit dirty();
}
void ResourceCacheSharedItems::appendActiveRequest(Resource* resource) {
Lock lock(_mutex);
_loadingRequests.append(resource);
@ -341,7 +380,7 @@ void Resource::allReferencesCleared() {
_cache->addUnusedResource(self);
} else {
delete this;
deleteLater();
}
}
@ -377,6 +416,11 @@ void Resource::finishedLoading(bool success) {
emit finished(success);
}
void Resource::setSize(const qint64& bytes) {
QMetaObject::invokeMethod(_cache.data(), "updateTotalSize", Q_ARG(qint64, _bytes), Q_ARG(qint64, bytes));
_bytes = bytes;
}
void Resource::reinsert() {
_cache->_resources.insert(_url, _self);
}
@ -412,7 +456,7 @@ void Resource::handleDownloadProgress(uint64_t bytesReceived, uint64_t bytesTota
void Resource::handleReplyFinished() {
Q_ASSERT_X(_request, "Resource::handleReplyFinished", "Request should not be null while in handleReplyFinished");
_bytes = _bytesTotal;
setSize(_bytesTotal);
if (!_request || _request != sender()) {
// This can happen in the edge case that a request is timed out, but a `finished` signal is emitted before it is deleted.

View file

@ -12,7 +12,9 @@
#ifndef hifi_ResourceCache_h
#define hifi_ResourceCache_h
#include <atomic>
#include <mutex>
#include <QtCore/QHash>
#include <QtCore/QList>
#include <QtCore/QObject>
@ -29,6 +31,8 @@
#include "ResourceManager.h"
Q_DECLARE_METATYPE(size_t)
class QNetworkReply;
class QTimer;
@ -79,8 +83,20 @@ private:
/// Base class for resource caches.
class ResourceCache : public QObject {
Q_OBJECT
Q_PROPERTY(size_t numTotal READ getNumTotalResources NOTIFY dirty)
Q_PROPERTY(size_t numCached READ getNumCachedResources NOTIFY dirty)
Q_PROPERTY(size_t sizeTotal READ getSizeTotalResources NOTIFY dirty)
Q_PROPERTY(size_t sizeCached READ getSizeCachedResources NOTIFY dirty)
public:
size_t getNumTotalResources() const { return _numTotalResources; }
size_t getSizeTotalResources() const { return _totalResourcesSize; }
size_t getNumCachedResources() const { return _numUnusedResources; }
size_t getSizeCachedResources() const { return _unusedResourcesSize; }
Q_INVOKABLE QVariantList getResourceList();
static void setRequestLimit(int limit);
static int getRequestLimit() { return _requestLimit; }
@ -101,15 +117,21 @@ public:
void refreshAll();
void refresh(const QUrl& url);
signals:
void dirty();
public slots:
void checkAsynchronousGets();
protected slots:
void updateTotalSize(const qint64& oldSize, const qint64& newSize);
protected:
/// Loads a resource from the specified URL.
/// \param fallback a fallback URL to load if the desired one is unavailable
/// \param delayLoad if true, don't load the resource immediately; wait until load is first requested
/// \param extra extra data to pass to the creator, if appropriate
Q_INVOKABLE QSharedPointer<Resource> getResource(const QUrl& url, const QUrl& fallback = QUrl(),
QSharedPointer<Resource> getResource(const QUrl& url, const QUrl& fallback = QUrl(),
bool delayLoad = false, void* extra = NULL);
/// Creates a new resource.
@ -118,18 +140,20 @@ protected:
void addUnusedResource(const QSharedPointer<Resource>& resource);
void removeUnusedResource(const QSharedPointer<Resource>& resource);
void reserveUnusedResource(qint64 resourceSize);
void clearUnusedResource();
/// Attempt to load a resource if requests are below the limit, otherwise queue the resource for loading
/// \return true if the resource began loading, otherwise false if the resource is in the pending queue
Q_INVOKABLE static bool attemptRequest(Resource* resource);
static bool attemptRequest(Resource* resource);
static void requestCompleted(Resource* resource);
static bool attemptHighestPriorityRequest();
private:
friend class Resource;
void reserveUnusedResource(qint64 resourceSize);
void clearUnusedResource();
void resetResourceCounters();
QHash<QUrl, QWeakPointer<Resource>> _resources;
int _lastLRUKey = 0;
@ -140,8 +164,13 @@ private:
QReadWriteLock _resourcesToBeGottenLock;
QQueue<QUrl> _resourcesToBeGotten;
std::atomic<size_t> _numTotalResources { 0 };
std::atomic<size_t> _numUnusedResources { 0 };
std::atomic<qint64> _totalResourcesSize { 0 };
std::atomic<qint64> _unusedResourcesSize { 0 };
qint64 _unusedResourcesMaxSize = DEFAULT_UNUSED_MAX_SIZE;
qint64 _unusedResourcesSize = 0;
QMap<int, QSharedPointer<Resource>> _unusedResources;
};
@ -226,7 +255,7 @@ protected:
virtual void downloadFinished(const QByteArray& data) { finishedLoading(true); }
/// Called when the download is finished and processed, sets the number of actual bytes.
void setBytes(qint64 bytes) { _bytes = bytes; }
void setSize(const qint64& bytes);
/// Called when the download is finished and processed.
/// This should be called by subclasses that override downloadFinished to mark the end of processing.

View file

@ -384,9 +384,10 @@ int Octree::readElementData(OctreeElementPointer destinationElement, const unsig
// check the exists mask to see if we have a child to traverse into
if (oneAtBit(childInBufferMask, childIndex)) {
if (!destinationElement->getChildAtIndex(childIndex)) {
auto childAt = destinationElement->getChildAtIndex(childIndex);
if (!childAt) {
// add a child at that index, if it doesn't exist
destinationElement->addChildAtIndex(childIndex);
childAt = destinationElement->addChildAtIndex(childIndex);
bool nodeIsDirty = destinationElement->isDirty();
if (nodeIsDirty) {
_isDirty = true;
@ -394,8 +395,7 @@ int Octree::readElementData(OctreeElementPointer destinationElement, const unsig
}
// tell the child to read the subsequent data
int lowerLevelBytes = readElementData(destinationElement->getChildAtIndex(childIndex),
nodeData + bytesRead, bytesLeftToRead, args);
int lowerLevelBytes = readElementData(childAt, nodeData + bytesRead, bytesLeftToRead, args);
bytesRead += lowerLevelBytes;
bytesLeftToRead -= lowerLevelBytes;

View file

@ -161,7 +161,12 @@ PhysicsMotionType EntityMotionState::computePhysicsMotionType() const {
}
return MOTION_TYPE_DYNAMIC;
}
return (_entity->isMovingRelativeToParent() || _entity->hasActions()) ? MOTION_TYPE_KINEMATIC : MOTION_TYPE_STATIC;
if (_entity->isMovingRelativeToParent() ||
_entity->hasActions() ||
_entity->hasAncestorOfType(NestableType::Avatar)) {
return MOTION_TYPE_KINEMATIC;
}
return MOTION_TYPE_STATIC;
}
bool EntityMotionState::isMoving() const {
@ -202,8 +207,16 @@ void EntityMotionState::setWorldTransform(const btTransform& worldTrans) {
assert(entityTreeIsLocked());
measureBodyAcceleration();
_entity->setPosition(bulletToGLM(worldTrans.getOrigin()) + ObjectMotionState::getWorldOffset());
_entity->setRotation(bulletToGLM(worldTrans.getRotation()));
bool positionSuccess;
_entity->setPosition(bulletToGLM(worldTrans.getOrigin()) + ObjectMotionState::getWorldOffset(), positionSuccess, false);
if (!positionSuccess) {
qDebug() << "EntityMotionState::setWorldTransform setPosition failed" << _entity->getID();
}
bool orientationSuccess;
_entity->setOrientation(bulletToGLM(worldTrans.getRotation()), orientationSuccess, false);
if (!orientationSuccess) {
qDebug() << "EntityMotionState::setWorldTransform setOrientation failed" << _entity->getID();
}
_entity->setVelocity(getBodyLinearVelocity());
_entity->setAngularVelocity(getBodyAngularVelocity());
_entity->setLastSimulated(usecTimestampNow());

View file

@ -96,6 +96,7 @@ bool Procedural::parseVersion(const QJsonValue& version) {
bool Procedural::parseUrl(const QUrl& shaderUrl) {
if (!shaderUrl.isValid()) {
qWarning() << "Invalid shader URL: " << shaderUrl;
_networkShader.reset();
return false;
}
@ -110,6 +111,7 @@ bool Procedural::parseUrl(const QUrl& shaderUrl) {
_shaderPath = _shaderUrl.toLocalFile();
qDebug() << "Shader path: " << _shaderPath;
if (!QFile(_shaderPath).exists()) {
_networkShader.reset();
return false;;
}
} else {
@ -135,9 +137,14 @@ bool Procedural::parseTextures(const QJsonArray& channels) {
auto textureCache = DependencyManager::get<TextureCache>();
size_t channelCount = std::min(MAX_PROCEDURAL_TEXTURE_CHANNELS, (size_t)_parsedChannels.size());
for (size_t i = 0; i < channelCount; ++i) {
QString url = _parsedChannels.at((int)i).toString();
_channels[i] = textureCache->getTexture(QUrl(url));
size_t channel = 0;
for (; channel < channelCount; ++channel) {
QString url = _parsedChannels.at((int)channel).toString();
_channels[channel] = textureCache->getTexture(QUrl(url));
}
for (; channel < MAX_PROCEDURAL_TEXTURE_CHANNELS; ++channel) {
// Release those textures no longer in use
_channels[channel] = textureCache->getTexture(QUrl());
}
_channelsDirty = true;
@ -149,20 +156,21 @@ bool Procedural::parseTextures(const QJsonArray& channels) {
void Procedural::parse(const QJsonObject& proceduralData) {
_enabled = false;
if (proceduralData.isEmpty()) {
return;
}
auto version = proceduralData[VERSION_KEY];
auto shaderUrl = proceduralData[URL_KEY].toString();
shaderUrl = ResourceManager::normalizeURL(shaderUrl);
auto uniforms = proceduralData[UNIFORMS_KEY].toObject();
auto channels = proceduralData[CHANNELS_KEY].toArray();
if (parseVersion(version) &&
parseUrl(shaderUrl) &&
parseUniforms(uniforms) &&
parseTextures(channels)) {
bool isValid = true;
// Run through parsing regardless of validity to clear old cached resources
isValid = parseVersion(version) && isValid;
isValid = parseUrl(shaderUrl) && isValid;
isValid = parseUniforms(uniforms) && isValid;
isValid = parseTextures(channels) && isValid;
if (!proceduralData.isEmpty() && isValid) {
_enabled = true;
}
}

View file

@ -25,6 +25,14 @@ ProceduralSkybox::ProceduralSkybox() : model::Skybox() {
_procedural._state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
}
void ProceduralSkybox::clear() {
// Parse and prepare a procedural with no shaders to release textures
parse(QString());
_procedural.ready();
Skybox::clear();
}
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& frustum) const {
if (_procedural.ready()) {
ProceduralSkybox::render(batch, frustum, (*this));

View file

@ -24,6 +24,8 @@ public:
void parse(const QString& userData) { _procedural.parse(userData); }
virtual void clear() override;
virtual void render(gpu::Batch& batch, const ViewFrustum& frustum) const;
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const ProceduralSkybox& skybox);

View file

@ -159,11 +159,13 @@ void Model::updateRenderItems() {
Transform collisionMeshOffset;
collisionMeshOffset.postTranslate(self->_offset);
uint32_t deleteGeometryCounter = self->_deleteGeometryCounter;
render::PendingChanges pendingChanges;
foreach (auto itemID, self->_modelMeshRenderItems.keys()) {
pendingChanges.updateItem<ModelMeshPartPayload>(itemID, [modelTransform, modelMeshOffset](ModelMeshPartPayload& data) {
pendingChanges.updateItem<ModelMeshPartPayload>(itemID, [modelTransform, modelMeshOffset, deleteGeometryCounter](ModelMeshPartPayload& data) {
// Ensure the model geometry was not reset between frames
if (data._model->isLoaded()) {
if (data._model && data._model->isLoaded() && deleteGeometryCounter == data._model->_deleteGeometryCounter) {
// lazy update of cluster matrices used for rendering. We need to update them here, so we can correctly update the bounding box.
data._model->updateClusterMatrices(modelTransform.getTranslation(), modelTransform.getRotation());
@ -1146,6 +1148,7 @@ void Model::setBlendedVertices(int blendNumber, const std::weak_ptr<NetworkGeome
}
void Model::deleteGeometry() {
_deleteGeometryCounter++;
_blendedVertexBuffers.clear();
_meshStates.clear();
_rig->destroyAnimGraph();

View file

@ -394,6 +394,8 @@ protected:
friend class ModelMeshPartPayload;
RigPointer _rig;
uint32_t _deleteGeometryCounter { 0 };
};
Q_DECLARE_METATYPE(ModelPointer)

View file

@ -0,0 +1,52 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
// sdf_text.frag
// fragment shader
//
// Created by Bradley Austin Davis on 2015-02-04
// Based on fragment shader code from
// https://github.com/paulhoux/Cinder-Samples/blob/master/TextRendering/include/text/Text.cpp
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
uniform sampler2D Font;
uniform bool Outline;
uniform vec4 Color;
// the interpolated normal
in vec3 _normal;
in vec2 _texCoord0;
layout(location = 0) out vec4 _fragColor0;
const float gamma = 2.2;
const float smoothing = 32.0;
const float interiorCutoff = 0.8;
const float outlineExpansion = 0.2;
void main() {
// retrieve signed distance
float sdf = texture(Font, _texCoord0).g;
if (Outline) {
if (sdf > interiorCutoff) {
sdf = 1.0 - sdf;
} else {
sdf += outlineExpansion;
}
}
// perform adaptive anti-aliasing of the edges
// The larger we're rendering, the less anti-aliasing we need
float s = smoothing * length(fwidth(_texCoord0));
float w = clamp( s, 0.0, 0.5);
float a = smoothstep(0.5 - w, 0.5 + w, sdf);
// gamma correction for linear attenuation
a = pow(a, 1.0 / gamma);
// discard if unvisible
if (a < 0.01) {
discard;
}
_fragColor0 = vec4(Color.rgb, a);
}

View file

@ -9,6 +9,7 @@
#include "sdf_text3D_vert.h"
#include "sdf_text3D_frag.h"
#include "sdf_text3D_overlay_frag.h"
#include "../RenderUtilsLogging.h"
#include "FontFamilies.h"
@ -220,10 +221,13 @@ void Font::setupGPU() {
{
auto vertexShader = gpu::Shader::createVertex(std::string(sdf_text3D_vert));
auto pixelShader = gpu::Shader::createPixel(std::string(sdf_text3D_frag));
auto pixelShaderOverlay = gpu::Shader::createPixel(std::string(sdf_text3D_overlay_frag));
gpu::ShaderPointer program = gpu::Shader::createProgram(vertexShader, pixelShader);
gpu::ShaderPointer programOverlay = gpu::Shader::createProgram(vertexShader, pixelShaderOverlay);
gpu::Shader::BindingSet slotBindings;
gpu::Shader::makeProgram(*program, slotBindings);
gpu::Shader::makeProgram(*programOverlay, slotBindings);
_fontLoc = program->getTextures().findLocation("Font");
_outlineLoc = program->getUniforms().findLocation("Outline");
@ -237,9 +241,10 @@ void Font::setupGPU() {
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
_pipeline = gpu::Pipeline::create(program, state);
auto layeredState = std::make_shared<gpu::State>(state->getValues());
layeredState->setDepthTest(false);
_layeredPipeline = gpu::Pipeline::create(program, layeredState);
auto layeredState = std::make_shared<gpu::State>();
layeredState->setCullMode(gpu::State::CULL_BACK);
layeredState->setDepthTest(true, true, gpu::LESS_EQUAL);
_layeredPipeline = gpu::Pipeline::create(programOverlay, layeredState);
}
// Sanity checks

View file

@ -17,7 +17,6 @@
void registerAudioMetaTypes(QScriptEngine* engine) {
qScriptRegisterMetaType(engine, injectorOptionsToScriptValue, injectorOptionsFromScriptValue);
qScriptRegisterMetaType(engine, soundSharedPointerToScriptValue, soundSharedPointerFromScriptValue);
qScriptRegisterMetaType(engine, soundPointerToScriptValue, soundPointerFromScriptValue);
}
AudioScriptingInterface& AudioScriptingInterface::getInstance() {
@ -31,13 +30,14 @@ AudioScriptingInterface::AudioScriptingInterface() :
}
ScriptAudioInjector* AudioScriptingInterface::playSound(Sound* sound, const AudioInjectorOptions& injectorOptions) {
ScriptAudioInjector* AudioScriptingInterface::playSound(SharedSoundPointer sound, const AudioInjectorOptions& injectorOptions) {
if (QThread::currentThread() != thread()) {
ScriptAudioInjector* injector = NULL;
QMetaObject::invokeMethod(this, "playSound", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(ScriptAudioInjector*, injector),
Q_ARG(Sound*, sound), Q_ARG(const AudioInjectorOptions&, injectorOptions));
Q_ARG(SharedSoundPointer, sound),
Q_ARG(const AudioInjectorOptions&, injectorOptions));
return injector;
}

View file

@ -27,7 +27,7 @@ public:
protected:
// this method is protected to stop C++ callers from calling, but invokable from script
Q_INVOKABLE ScriptAudioInjector* playSound(Sound* sound, const AudioInjectorOptions& injectorOptions = AudioInjectorOptions());
Q_INVOKABLE ScriptAudioInjector* playSound(SharedSoundPointer sound, const AudioInjectorOptions& injectorOptions = AudioInjectorOptions());
Q_INVOKABLE void setStereoInput(bool stereo);

Some files were not shown because too many files have changed in this diff Show more