Merge branch 'master' of https://github.com/highfidelity/hifi into graphicsMaster

This commit is contained in:
samcake 2015-12-11 17:49:00 -08:00
commit b3acde4462
40 changed files with 1400 additions and 395 deletions

View file

@ -36,7 +36,12 @@ if (WIN32)
if (MSVC10)
set(WINDOW_SDK_PATH "C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1 ")
elseif (MSVC12)
set(WINDOW_SDK_PATH "C:\\Program Files (x86)\\Windows Kits\\8.1\\Lib\\winv6.3\\um\\x86 ")
if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set(WINDOW_SDK_FOLDER "x64")
else()
set(WINDOW_SDK_FOLDER "x86")
endif()
set(WINDOW_SDK_PATH "C:\\Program Files (x86)\\Windows Kits\\8.1\\Lib\\winv6.3\\um\\${WINDOW_SDK_FOLDER}")
endif ()
message (WINDOW_SDK_PATH= ${WINDOW_SDK_PATH})
set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${WINDOW_SDK_PATH})

View file

@ -225,6 +225,8 @@ bool OctreeQueryNode::updateCurrentViewFrustum() {
newestViewFrustum.setPosition(getCameraPosition());
newestViewFrustum.setOrientation(getCameraOrientation());
newestViewFrustum.setKeyholeRadius(getKeyholeRadius());
// Also make sure it's got the correct lens details from the camera
float originalFOV = getCameraFov();
float wideFOV = originalFOV + VIEW_FRUSTUM_FOV_OVERSEND;

View file

@ -8,14 +8,14 @@ string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
#set(SIXENSE_URL_MD5 "10cc8dc470d2ac1244a88cf04bc549cc")
#set(SIXENSE_NEW_LAYOUT 0)
#set(SIXENSE_URL "http://public.s3.amazonaws.com/dependencies/SixenseSDK_071615.zip")
#set(SIXENSE_URL_MD5 "752a3901f334124e9cffc2ba4136ef7d")
#set(SIXENSE_NEW_LAYOUT 1)
set(SIXENSE_URL "http://hifi-public.s3.amazonaws.com/dependencies/SixenseSDK_102215.zip")
set(SIXENSE_URL_MD5 "93c3a6795cce777a0f472b09532935f1")
set(SIXENSE_URL "http://hifi-public.s3.amazonaws.com/dependencies/SixenseSDK_071615.zip")
set(SIXENSE_URL_MD5 "752a3901f334124e9cffc2ba4136ef7d")
set(SIXENSE_NEW_LAYOUT 1)
#set(SIXENSE_URL "http://hifi-public.s3.amazonaws.com/dependencies/SixenseSDK_102215.zip")
#set(SIXENSE_URL_MD5 "93c3a6795cce777a0f472b09532935f1")
#set(SIXENSE_NEW_LAYOUT 1)
ExternalProject_Add(
${EXTERNAL_NAME}
URL ${SIXENSE_URL}

View file

@ -14,7 +14,6 @@
// Goes into "paused" when the '.' key (and automatically when started in HMD), and normal when pressing any key.
// See MAIN CONTROL, below, for what "paused" actually does.
var IK_WINDOW_AFTER_GOING_ACTIVE = 3000; // milliseconds
var OVERLAY_DATA = {
text: "Paused:\npress any key to continue",
font: {size: 75},
@ -31,7 +30,6 @@ function playAwayAnimation() {
return {isAway: true, isNotAway: false, isNotMoving: false, ikOverlayAlpha: 0.0};
}
if (stopper) {
Script.clearTimeout(stopper);
stopper = false;
MyAvatar.removeAnimationStateHandler(activeAnimationHandlerId); // do it now, before making new assignment
}
@ -47,15 +45,14 @@ function stopAwayAnimation() {
// It cannot be as soon as we want to stop the away animation, because then things will look goofy as we come out of that animation.
// (Imagine an away animation that sits or kneels, and then stands back up when coming out of it. If head is at the HMD, then it won't
// want to track the standing up animation.)
// Our standard anim graph flips 'awayOutroOnDone' for one frame, but it's a trigger (not an animVar) and other folks might use different graphs.
// So... Just give us a fixed amount of time to be done with animation, before we turn ik back on.
// The anim graph will trigger awayOutroOnDone when awayOutro is finished.
var backToNormal = false;
stopper = Script.setTimeout(function () {
backToNormal = true;
stopper = false;
}, IK_WINDOW_AFTER_GOING_ACTIVE);
stopper = true;
function animateActive(state) {
if (state.ikOverlayAlpha) {
if (state.awayOutroOnDone) {
backToNormal = true;
stopper = false;
} else if (state.ikOverlayAlpha) {
// Once the right state gets reflected back to us, we don't need the hander any more.
// But we are locked against handler changes during the execution of a handler, so remove asynchronously.
Script.setTimeout(function () { MyAvatar.removeAnimationStateHandler(activeAnimationHandlerId); }, 0);
@ -63,7 +60,7 @@ function stopAwayAnimation() {
// It might be cool to "come back to life" by fading the ik overlay back in over a short time. But let's see how this goes.
return {isAway: false, isNotAway: true, ikOverlayAlpha: backToNormal ? 1.0 : 0.0}; // IWBNI we had a way of deleting an anim var.
}
activeAnimationHandlerId = MyAvatar.addAnimationStateHandler(animateActive, ['isAway', 'isNotAway', 'isNotMoving', 'ikOverlayAlpha']);
activeAnimationHandlerId = MyAvatar.addAnimationStateHandler(animateActive, ['ikOverlayAlpha', 'awayOutroOnDone']);
}
// OVERLAY

View file

@ -0,0 +1,55 @@
//
// createAvatarDetector.js
//
// Created by James B. Pollack @imgntn on 12/7/2015
// Copyright 2015 High Fidelity, Inc.
//
// Run this script if you want the rats to run away from you.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
var avatarDetector = null;
function createAvatarDetector() {
var detectorProperties = {
name: 'Hifi-Avatar-Detector',
type: 'Box',
position: MyAvatar.position,
dimensions: {
x: 1,
y: 2,
z: 1
},
collisionsWillMove: false,
ignoreForCollisions: true,
visible: false,
color: {
red: 255,
green: 0,
blue: 0
}
}
avatarDetector = Entities.addEntity(detectorProperties);
};
var updateAvatarDetector = function() {
// print('updating detector position' + JSON.stringify(MyAvatar.position))
Entities.editEntity(avatarDetector, {
position: MyAvatar.position
});
};
var cleanup = function() {
Script.update.disconnect(updateAvatarDetector);
Entities.deleteEntity(avatarDetector);
}
createAvatarDetector();
Script.scriptEnding.connect(cleanup);
Script.update.connect(updateAvatarDetector);

View file

@ -0,0 +1,471 @@
//
// ratCreator.js
//
// Created by James B. Pollack @imgntn on 12/7/2015
// Copyright 2015 High Fidelity, Inc.
//
// This script spawns some rats that have simple steering behaviors applied to them.
// Run it in the 'drylake' environment, or adjust all object locations to match your scene.
//
// Steering bevhaviors from ratSteer.js:
// The rats will move from a spawning point toward their nest.
// They will avoid avoider blocks moving across the alley
// They will avoid avatars running createAvatarDetector.js
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
Script.include('ratSteer.js');
var steer = loadSteer();
Script.include('../libraries/tween.js');
var TWEEN = loadTween();
var USE_CONSTANT_SPAWNER = true;
var RAT_SPAWNER_LOCATION = {
x: 1000.5,
y: 98,
z: 1040
};
var RAT_NEST_LOCATION = {
x: 1003.5,
y: 99,
z: 964.2
};
var RAT_DIMENSIONS = {
x: 0.22,
y: 0.32,
z: 1.14
};
var RAT_MODEL_URL = 'http://hifi-content.s3.amazonaws.com/james/rat/models/rat_model.fbx';
var RAT_IDLE_ANIMATION_URL = 'http://hifi-content.s3.amazonaws.com/james/rat/animations/idle.fbx';
var RAT_WALKING_ANIMATION_URL = 'http://hifi-content.s3.amazonaws.com/james/rat/animations/walk.fbx';
var RAT_RUNNING_ANIMATION_URL = 'http://hifi-content.s3.amazonaws.com/james/rat/animations/run.fbx';
var RAT_DEATH_ANIMATION_URL = 'http://hifi-content.s3.amazonaws.com/james/rat/animations/death.fbx';
var RAT_IN_NEST_DISTANCE = 4;
//how many milliseconds between rats
var RAT_SPAWN_RATE = 2500;
var RAT_SOUND_URL = 'http://hifi-public.s3.amazonaws.com/sounds/Rats_Squeaks_Active.wav';
var ratRunningSound = SoundCache.getSound(RAT_SOUND_URL);
function playRatRunningAnimation(rat) {
var animationSettings = JSON.stringify({
running: true
});
Entities.editEntity(rat, {
animationURL: RAT_RUNNING_ANIMATION_URL,
animation: {
url: RAT_RUNNING_ANIMATION_URL,
running: true,
fps: 30
},
});
}
function playRatDeathAnimation(rat) {
var animationSettings = JSON.stringify({
running: true
});
Entities.editEntity(rat, {
animationURL: RAT_DEATH_ANIMATION_URL,
animationSettings: animationSettings
});
}
var modelRatProperties = {
name: 'rat',
type: 'Model',
modelURL: RAT_MODEL_URL,
dimensions: RAT_DIMENSIONS,
position: RAT_SPAWNER_LOCATION,
shapeType: 'Box',
damping: 0.8,
angularDamping: 0.99,
friction: 0.75,
collisionsWillMove: true,
ignoreForCollisions: false,
gravity: {
x: 0,
y: -9.8,
z: 0
},
lifetime: 30,
userData: JSON.stringify({
grabbableKey: {
grabbable: false
}
})
};
var targetProperties = {
name: 'Hifi-Rat-Nest',
type: 'Box',
color: {
red: 0,
green: 255,
blue: 0
},
dimensions: {
x: 1,
y: 1,
z: 1
},
visible: false,
position: RAT_NEST_LOCATION
};
var target = Entities.addEntity(targetProperties);
function addRat() {
var rat = Entities.addEntity(modelRatProperties);
return rat;
}
//every sixth rat will play a sound
var RAT_SOUND_RATE = 6;
//spawn rate will be multiplied by this to clear any sounds hanging around
var RAT_SOUND_CLEAR_RATE = 3;
var rats = [];
var metaRats = [];
var ratCount = 0;
var AVOIDER_Y_HEIGHT = 99;
var FIRST_AVOIDER_START_POSITION = {
x: 1004,
y: AVOIDER_Y_HEIGHT,
z: 1019
};
var FIRST_AVOIDER_FINISH_POSITION = {
x: 997,
y: AVOIDER_Y_HEIGHT,
z: 1019
};
var SECOND_AVOIDER_START_POSITION = {
x: 998,
y: AVOIDER_Y_HEIGHT,
z: 998
};
var SECOND_AVOIDER_FINISH_POSITION = {
x: 1005,
y: AVOIDER_Y_HEIGHT,
z: 999
};
var THIRD_AVOIDER_START_POSITION = {
x: 1001.5,
y: 100,
z: 978
};
var THIRD_AVOIDER_FINISH_POSITION = {
x: 1005,
y: 100,
z: 974
};
cleanupLeftoverAvoidersBeforeStart();
var avoiders = [];
addAvoiderBlock(FIRST_AVOIDER_START_POSITION);
addAvoiderBlock(SECOND_AVOIDER_START_POSITION);
addAvoiderBlock(THIRD_AVOIDER_START_POSITION);
function addAvoiderBlock(position) {
var avoiderProperties = {
name: 'Hifi-Rat-Avoider',
type: 'Box',
color: {
red: 255,
green: 0,
blue: 255
},
dimensions: {
x: 1,
y: 1,
z: 1
},
position: position,
collisionsWillMove: false,
ignoreForCollisions: true,
visible: false
};
var avoider = Entities.addEntity(avoiderProperties);
avoiders.push(avoider);
}
tweenAvoider(avoiders[0], FIRST_AVOIDER_START_POSITION, FIRST_AVOIDER_FINISH_POSITION);
tweenAvoider(avoiders[1], SECOND_AVOIDER_START_POSITION, SECOND_AVOIDER_FINISH_POSITION);
tweenAvoider(avoiders[2], THIRD_AVOIDER_START_POSITION, THIRD_AVOIDER_FINISH_POSITION);
function tweenAvoider(entityID, startPosition, endPosition) {
var ANIMATION_DURATION = 4200;
var begin = {
x: startPosition.x,
y: startPosition.y,
z: startPosition.z
};
var end = endPosition;
var original = startPosition;
var tweenHead = new TWEEN.Tween(begin).to(end, ANIMATION_DURATION);
function updateTo() {
Entities.editEntity(entityID, {
position: {
x: begin.x,
y: begin.y,
z: begin.z
}
});
}
function updateBack() {
Entities.editEntity(entityID, {
position: {
x: begin.x,
y: begin.y,
z: begin.z
}
})
}
var tweenBack = new TWEEN.Tween(begin).to(original, ANIMATION_DURATION).onUpdate(updateBack);
tweenHead.onUpdate(function() {
updateTo()
});
tweenHead.chain(tweenBack);
tweenBack.chain(tweenHead);
tweenHead.start();
}
function updateTweens() {
TWEEN.update();
}
function createRatSoundInjector() {
var audioOptions = {
volume: 0.05,
loop: false
};
var injector = Audio.playSound(ratRunningSound, audioOptions);
return injector;
}
function moveRats() {
rats.forEach(function(rat) {
//remove the rat if its near the nest
checkDistanceFromNest(rat);
//see if there are avatars to run from
var avatarFlightVectors = steer.fleeAllAvatars(rat);
var averageAvatarFlight;
var i;
for (i = 0; i < avatarFlightVectors.length; i++) {
if (i === 0) {
averageAvatarFlight = avatarFlightVectors[0];
} else {
averageAvatarFlight = Vec3.sum(avatarFlightVectors[i - 1], avatarFlightVectors[i]);
}
}
averageAvatarFlight = Vec3.multiply(averageAvatarFlight, 1 / avatarFlightVectors.length);
//see if there are avoiders to flee
var avoidBlockVectors = steer.fleeAvoiderBlocks(rat);
var averageAvoiderFlight;
var j;
for (j = 0; j < avoidBlockVectors.length; j++) {
if (j === 0) {
averageAvoiderFlight = avoidBlockVectors[0];
} else {
averageAvoiderFlight = Vec3.sum(avoidBlockVectors[j - 1], avoidBlockVectors[j]);
}
};
averageAvoiderFlight = Vec3.multiply(averageAvoiderFlight, 1 / avoidBlockVectors.length);
//add all of the vectors and divide them by total to get average vector
//start by trying to go toward the nest
var seek = steer.arrive(rat, target);
var averageVector = seek;
var divisorCount = 1;
//if there are avatars to run away from
if (avatarFlightVectors.length > 0) {
divisorCount++;
averageVector = Vec3.sum(averageVector, averageAvatarFlight);
}
//or if there are avoider blocks to run away from
if (avoidBlockVectors.length > 0) {
divisorCount++;
averageVector = Vec3.sum(averageVector, averageAvoiderFlight);
}
averageVector = Vec3.multiply(averageVector, 1 / divisorCount);
var thisRatProps = Entities.getEntityProperties(rat, ["position", "rotation"]);
var ratPosition = thisRatProps.position;
var ratToNest = Vec3.subtract(RAT_NEST_LOCATION, ratPosition);
var ratRotation = Quat.rotationBetween(Vec3.UNIT_Z, ratToNest);
var eulerAngle = Quat.safeEulerAngles(ratRotation);
eulerAngle.x = 0;
eulerAngle.z = 0;
var constrainedRotation = Quat.fromVec3Degrees(eulerAngle);
Entities.editEntity(rat, {
velocity: averageVector,
rotation: constrainedRotation,
});
//have to make a 'meta' rat object to keep track of rats for updating sound injector locations. parenting sounds would make this easy.
var metaRat = getMetaRatByRat(rat);
if (metaRat !== undefined) {
if (metaRat.injector !== undefined) {
if (metaRat.injector.isPlaying === true) {
metaRat.injector.options = {
loop: true,
position: ratPosition
};
}
}
}
})
}
Script.update.connect(moveRats)
Script.update.connect(updateTweens);
function checkDistanceFromNest(rat) {
var ratProps = Entities.getEntityProperties(rat, "position");
var distance = Vec3.distance(ratProps.position, RAT_NEST_LOCATION);
if (distance < RAT_IN_NEST_DISTANCE) {
//at nest
removeRatFromScene(rat);
}
}
function removeRatFromScene(rat) {
var index = rats.indexOf(rat);
if (index > -1) {
rats.splice(index, 1);
Entities.deleteEntity(rat);
}
var metaRatIndex = findWithAttr(metaRats, 'rat', rat);
if (metaRatIndex > -1) {
metaRats[index].injector.stop();
metaRats.splice(index, 1);
}
}
function popRatFromStack(entityID) {
var index = rats.indexOf(entityID);
if (index > -1) {
rats.splice(index, 1);
}
var metaRatIndex = findWithAttr(metaRats, 'rat', entityID);
if (metaRatIndex > -1) {
metaRats[index].injector.stop();
metaRats.splice(index, 1);
}
}
function findWithAttr(array, attr, value) {
for (var i = 0; i < array.length; i += 1) {
if (array[i][attr] === value) {
return i;
}
}
}
function getMetaRatByRat(rat) {
var result = metaRats.filter(function(metaRat) {
return rat === metaRat.rat;
});
return result[0];
}
Entities.deletingEntity.connect(popRatFromStack);
function cleanupLeftoverAvoidersBeforeStart() {
//sometimes if we crash or something there could be extra avoider blocks around. clear them out.
var nearbyEntities = Entities.findEntities(RAT_SPAWNER_LOCATION, 100);
var entityIndex;
for (entityIndex = 0; entityIndex < nearbyEntities.length; entityIndex++) {
var entityID = nearbyEntities[entityIndex];
var entityProps = Entities.getEntityProperties(entityID);
if (entityProps.name === 'Hifi-Rat-Avoider') {
Entities.deleteEntity(entityID);
}
}
}
function cleanup() {
while (rats.length > 0) {
Entities.deleteEntity(rats.pop());
}
while (avoiders.length > 0) {
Entities.deleteEntity(avoiders.pop());
}
Entities.deleteEntity(target);
Script.update.disconnect(moveRats);
Script.update.disconnect(updateTweens);
Entities.deletingEntity.disconnect(popRatFromStack);
Script.clearInterval(ratSpawnerInterval);
}
Script.scriptEnding.connect(cleanup);
var ratSpawnerInterval;
if (USE_CONSTANT_SPAWNER === true) {
ratSpawnerInterval = Script.setInterval(function() {
var rat = addRat();
playRatRunningAnimation(rat);
rats.push(rat);
ratCount++;
if (ratCount % RAT_SOUND_RATE === 0) {
var metaRat = {
rat: rat,
injector: createRatSoundInjector()
}
metaRats.push(metaRat);
Script.setTimeout(function() {
//if we have too many injectors hanging around there are problems
metaRat.injector.stop();
delete metaRat.injector;
}, RAT_SPAWN_RATE * RAT_SOUND_CLEAR_RATE;
}
}, RAT_SPAWN_RATE);
}

View file

@ -0,0 +1,183 @@
//
// ratSteer.js
//
// Created by James B. Pollack @imgntn on 12/7/2015
// Copyright 2015 High Fidelity, Inc.
//
// This is an example of steering behaviors that can be applied entities.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
function flee(thisEntity, target) {
var targetPosition = Entities.getEntityProperties(target, "position").position;
var properties = Entities.getEntityProperties(thisEntity, ["position", "velocity"]);
var location = properties.position;
var velocity = properties.velocity;
var MAX_SPEED = 1;
var MAX_FORCE = 1;
var FLEE_RANGE = 2;
var desired = Vec3.subtract(location, targetPosition);
var d = Vec3.length(desired);
desired = Vec3.normalize(desired);
desired = Vec3.multiply(MAX_SPEED, desired);
if (d < FLEE_RANGE) {
var steer = Vec3.subtract(desired, velocity);
var steerVector = new V3(desired.x, 0, desired.z);
steer = steerVector.limit(MAX_FORCE);
return steer;
} else {
//target too far away to flee
return
}
}
function fleeAllAvatars(thisEntity) {
//print('FLEE AVATARS');
var properties = Entities.getEntityProperties(thisEntity, ["position", "velocity"]);
var location = properties.position;
var velocity = properties.velocity;
var nearbyEntities = Entities.findEntities(location, 3);
var flightVectors = [];
for (var entityIndex = 0; entityIndex < nearbyEntities.length; entityIndex++) {
var entityID = nearbyEntities[entityIndex];
var entityProps = Entities.getEntityProperties(entityID);
if (entityProps.name === 'Hifi-Avatar-Detector') {
//found an avatar to flee
var MAX_SPEED = 8;
var MAX_FORCE = 8;
var FLEE_AVATAR_RANGE = 3;
var desired = Vec3.subtract(location, entityProps.position);
var d = Vec3.length(desired);
desired = Vec3.normalize(desired);
desired = Vec3.multiply(MAX_SPEED, desired);
if (d < FLEE_AVATAR_RANGE) {
var steer = Vec3.subtract(desired, velocity);
var steerVector = new V3(desired.x, 0, desired.z);
steer = steerVector.limit(MAX_FORCE);
flightVectors.push(steer);
} else {
// target too far away from this avatar to flee
}
}
}
return flightVectors;
}
function fleeAvoiderBlocks(thisEntity) {
// print('FLEE AVOIDER BLOCKS');
var properties = Entities.getEntityProperties(thisEntity, ["position", "velocity"]);
var location = properties.position;
var velocity = properties.velocity;
var nearbyEntities = Entities.findEntities(location, 2);
var flightVectors = [];
for (var entityIndex = 0; entityIndex < nearbyEntities.length; entityIndex++) {
var entityID = nearbyEntities[entityIndex];
var entityProps = Entities.getEntityProperties(entityID);
if (entityProps.name === 'Hifi-Rat-Avoider') {
//found an avoiderblock to flee
var MAX_SPEED = 11;
var MAX_FORCE = 6;
var FLEE_AVOIDER_RANGE = 5;
var desired = Vec3.subtract(location, entityProps.position);
var d = Vec3.length(desired);
desired = Vec3.normalize(desired);
desired = Vec3.multiply(MAX_SPEED, desired);
if (d < FLEE_AVOIDER_RANGE) {
var steer = Vec3.subtract(desired, velocity);
var steerVector = new V3(desired.x, 0, desired.z);
steer = steerVector.limit(MAX_FORCE);
flightVectors.push(steer);
} else {
//target too far away from this avoider to flee
}
}
}
return flightVectors;
}
function arrive(thisEntity, target) {
var targetPosition = Entities.getEntityProperties(target, "position").position;
var properties = Entities.getEntityProperties(thisEntity, ["position", "velocity"]);
var location = properties.position;
var velocity = properties.velocity;
var MAX_SPEED = 10;
var MAX_FORCE = 6;
var ARRIVAL_DISTANCE = 2;
var desired = Vec3.subtract(targetPosition, location);
var d = Vec3.length(desired);
desired = Vec3.normalize(desired);
if (d < ARRIVAL_DISTANCE) {
var m = scale(d, 0, ARRIVAL_DISTANCE, 0, MAX_SPEED);
} else {
desired = Vec3.multiply(MAX_SPEED, desired);
}
var steer = Vec3.subtract(desired, velocity);
var steerVector = new V3(desired.x, 0, desired.z);
steer = steerVector.limit(MAX_FORCE);
return steer;
}
function V3(x, y, z) {
this.x = x;
this.y = y;
this.z = z;
return
}
V3.prototype.length = function() {
return Math.sqrt(this.x * this.x + this.y * this.y + this.z * this.z);
};
V3.prototype.limit = function(s) {
var len = this.length();
if (len > s && len > 0) {
this.scale(s / len);
}
return this;
};
V3.prototype.scale = function(f) {
this.x *= f;
this.y *= f;
this.z *= f;
return this;
};
var v3 = new V3();
var scale = function(value, min1, max1, min2, max2) {
return min2 + (max2 - min2) * ((value - min1) / (max1 - min1));
}
loadSteer = function() {
return {
flee: flee,
fleeAllAvatars: fleeAllAvatars,
fleeAvoiderBlocks: fleeAvoiderBlocks,
arrive: arrive
};
}

View file

@ -2764,6 +2764,8 @@ void Application::reloadResourceCaches() {
DependencyManager::get<TextureCache>()->refreshAll();
DependencyManager::get<NodeList>()->reset(); // Force redownload of .fst models
getMyAvatar()->resetFullAvatarURL();
}
void Application::rotationModeChanged() {
@ -3077,6 +3079,7 @@ void Application::queryOctree(NodeType_t serverType, PacketType packetType, Node
_octreeQuery.setCameraNearClip(_viewFrustum.getNearClip());
_octreeQuery.setCameraFarClip(_viewFrustum.getFarClip());
_octreeQuery.setCameraEyeOffsetPosition(glm::vec3());
_octreeQuery.setKeyholeRadius(_viewFrustum.getKeyholeRadius());
auto lodManager = DependencyManager::get<LODManager>();
_octreeQuery.setOctreeSizeScale(lodManager->getOctreeSizeScale());
_octreeQuery.setBoundaryLevelAdjust(lodManager->getBoundaryLevelAdjust());
@ -3429,10 +3432,10 @@ namespace render {
// Background rendering decision
auto skyStage = DependencyManager::get<SceneScriptingInterface>()->getSkyStage();
auto skybox = model::SkyboxPointer();
if (skyStage->getBackgroundMode() == model::SunSkyStage::NO_BACKGROUND) {
// this line intentionally left blank
} else if (skyStage->getBackgroundMode() == model::SunSkyStage::SKY_DOME) {
if (/*!selfAvatarOnly &&*/ Menu::getInstance()->isOptionChecked(MenuOption::Stars)) {
if (Menu::getInstance()->isOptionChecked(MenuOption::Stars)) {
PerformanceTimer perfTimer("stars");
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings),
"Application::payloadRender<BackgroundRenderData>() ... stars...");
@ -3498,10 +3501,9 @@ namespace render {
}
} else if (skyStage->getBackgroundMode() == model::SunSkyStage::SKY_BOX) {
PerformanceTimer perfTimer("skybox");
skybox = skyStage->getSkybox();
auto skybox = skyStage->getSkybox();
if (skybox) {
skybox->render(batch, *(qApp->getDisplayViewFrustum()));
skybox->render(batch, *(args->_viewFrustum));
}
}
}
@ -3768,6 +3770,10 @@ void Application::clearDomainOctreeDetails() {
// reset the model renderer
getEntities()->clear();
auto skyStage = DependencyManager::get<SceneScriptingInterface>()->getSkyStage();
skyStage->setBackgroundMode(model::SunSkyStage::SKY_DOME);
}
void Application::domainChanged(const QString& domainHostname) {

View file

@ -986,6 +986,14 @@ void MyAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
_headBoneSet.clear();
}
void MyAvatar::resetFullAvatarURL() {
auto lastAvatarURL = getFullAvatarURLFromPreferences();
auto lastAvatarName = getFullAvatarModelName();
useFullAvatarURL(QUrl());
useFullAvatarURL(lastAvatarURL, lastAvatarName);
}
void MyAvatar::useFullAvatarURL(const QUrl& fullAvatarURL, const QString& modelName) {
if (QThread::currentThread() != thread()) {

View file

@ -196,6 +196,8 @@ public:
Q_INVOKABLE void useFullAvatarURL(const QUrl& fullAvatarURL, const QString& modelName = QString());
Q_INVOKABLE const QUrl& getFullAvatarURLFromPreferences() const { return _fullAvatarURLFromPreferences; }
Q_INVOKABLE const QString& getFullAvatarModelName() const { return _fullAvatarModelName; }
void resetFullAvatarURL();
virtual void setAttachmentData(const QVector<AttachmentData>& attachmentData) override;

View file

@ -50,9 +50,11 @@ QScriptValue AnimVariantMap::animVariantMapToScriptValue(QScriptEngine* engine,
if (useNames) { // copy only the requested names
for (const QString& name : names) {
auto search = _map.find(name);
if (search != _map.end()) { // scripts are allowed to request names that do not exist
if (search != _map.end()) {
setOne(name, search->second);
}
} else if (_triggers.count(name) == 1) {
target.setProperty(name, true);
} // scripts are allowed to request names that do not exist
}
} else { // copy all of them

View file

@ -717,7 +717,8 @@ void Rig::updateAnimationStateHandlers() { // called on avatar update thread (wh
// This works (I tried it), but the result would be that we would still have same runtime type checks as the invokeMethod above
// (occuring within the ScriptEngine::callAnimationStateHandler invokeMethod trampoline), _plus_ another runtime check for the dynamic_cast.
// gather results in (likely from an earlier update):
// Gather results in (likely from an earlier update).
// Note: the behavior is undefined if a handler (re-)sets a trigger. Scripts should not be doing that.
_animVars.copyVariantsFrom(value.results); // If multiple handlers write the same anim var, the last registgered wins. (_map preserves order).
}
}

View file

@ -226,9 +226,16 @@ bool EntityTree::updateEntityWithElement(EntityItemPointer entity, const EntityI
while (!toProcess.empty()) {
EntityItemPointer childEntity = std::static_pointer_cast<EntityItem>(toProcess.dequeue());
if (!childEntity) {
continue;
}
BoundingBoxRelatedProperties newChildBBRelProperties(childEntity);
EntityTreeElementPointer containingElement = childEntity->getElement();
if (!containingElement) {
continue;
}
UpdateEntityOperator theChildOperator(getThisPointer(),
childEntity->getElement(),
containingElement,
childEntity, newChildBBRelProperties);
recurseTreeWithOperator(&theChildOperator);
foreach (SpatiallyNestablePointer childChild, childEntity->getChildren()) {

View file

@ -49,35 +49,32 @@ template <
>
class GLEscrow {
public:
static const uint64_t MAX_UNSIGNALED_TIME = USECS_PER_SECOND / 2;
struct Item {
T _value;
const T _value;
GLsync _sync;
uint64_t _created;
const uint64_t _created;
Item(T value, GLsync sync) :
_value(value), _sync(sync), _created(usecTimestampNow())
{
}
uint64_t age() {
uint64_t age() const {
return usecTimestampNow() - _created;
}
bool signaled() {
bool signaled() const {
auto result = glClientWaitSync(_sync, 0, 0);
if (GL_TIMEOUT_EXPIRED != result && GL_WAIT_FAILED != result) {
return true;
}
if (age() > (USECS_PER_SECOND / 2)) {
qWarning() << "Long unsignaled sync";
}
return false;
}
};
using Mutex = std::recursive_mutex;
using Lock = std::unique_lock<Mutex>;
using Mutex = std::mutex;
using Recycler = std::function<void(T t)>;
// deque gives us random access, double ended push & pop and size, all in constant time
using Deque = std::deque<Item>;
@ -87,9 +84,32 @@ public:
_recycler = recycler;
}
size_t depth() {
template <typename F>
void withLock(F f) {
using Lock = std::unique_lock<Mutex>;
Lock lock(_mutex);
return _submits.size();
f();
}
template <typename F>
bool tryLock(F f) {
using Lock = std::unique_lock<Mutex>;
bool result = false;
Lock lock(_mutex, std::try_to_lock_t());
if (lock.owns_lock()) {
f();
result = true;
}
return result;
}
size_t depth() {
size_t result{ 0 };
withLock([&]{
result = _submits.size();
});
return result;
}
// Submit a new resource from the producer context
@ -104,11 +124,9 @@ public:
glFlush();
}
{
Lock lock(_mutex);
withLock([&]{
_submits.push_back(Item(t, writeSync));
}
});
return cleanTrash();
}
@ -120,13 +138,13 @@ public:
// On the one hand using try_lock() reduces the chance of blocking the consumer thread,
// but if the produce thread is going fast enough, it could effectively
// starve the consumer out of ever actually getting resources.
if (_mutex.try_lock()) {
tryLock([&]{
// May be called on any thread, but must be inside a locked section
if (signaled(_submits, 0)) {
result = _submits.at(0)._value;
_submits.pop_front();
}
_mutex.unlock();
}
});
return result;
}
@ -154,37 +172,45 @@ public:
glFlush();
}
Lock lock(_mutex);
_releases.push_back(Item(t, readSync));
withLock([&]{
_releases.push_back(Item(t, readSync));
});
}
private:
size_t cleanTrash() {
size_t wastedWork{ 0 };
List trash;
{
tryLock([&]{
while (!_submits.empty()) {
const auto& item = _submits.front();
if (!item._sync || item.age() < MAX_UNSIGNALED_TIME) {
break;
}
qWarning() << "Long unsignaled sync " << item._sync << " unsignaled for " << item.age();
_trash.push_front(item);
_submits.pop_front();
}
// We only ever need one ready item available in the list, so if the
// second item is signaled (implying the first is as well, remove the first
// item. Iterate until the SECOND item in the list is not in the ready state
// The signaled function takes care of checking against the deque size
while (signaled(_submits, 1)) {
pop(_submits);
_trash.push_front(_submits.front());
_submits.pop_front();
++wastedWork;
}
// Stuff in the release queue can be cleared out as soon as it's signaled
while (signaled(_releases, 0)) {
pop(_releases);
_trash.push_front(_releases.front());
_releases.pop_front();
}
{
// FIXME I don't think this lock should be necessary, only the submitting thread
// touches the trash
Lock lock(_mutex);
trash.swap(_trash);
}
}
trash.swap(_trash);
});
// FIXME maybe doing a timing on the deleters and warn if it's taking excessive time?
// although we are out of the lock, so it shouldn't be blocking anything
std::for_each(trash.begin(), trash.end(), [&](typename List::const_reference item) {
@ -198,14 +224,6 @@ private:
return wastedWork;
}
// May be called on any thread, but must be inside a locked section
void pop(Deque& deque) {
Lock lock(_mutex);
auto& item = deque.front();
_trash.push_front(item);
deque.pop_front();
}
// May be called on any thread, but must be inside a locked section
bool signaled(Deque& deque, size_t i) {
if (i >= deque.size()) {

View file

@ -8,16 +8,18 @@
#include "OffscreenQmlSurface.h"
#include "OglplusHelpers.h"
#include <QWidget>
#include <QtQml>
#include <QQmlEngine>
#include <QQmlComponent>
#include <QQuickItem>
#include <QQuickWindow>
#include <QQuickRenderControl>
#include <QWaitCondition>
#include <QMutex>
#include <QtWidgets/QWidget>
#include <QtQml/QtQml>
#include <QtQml/QQmlEngine>
#include <QtQml/QQmlComponent>
#include <QtQuick/QQuickItem>
#include <QtQuick/QQuickWindow>
#include <QtQuick/QQuickRenderControl>
#include <QtCore/QWaitCondition>
#include <QtCore/QMutex>
#include <QtGui/QOpenGLContext>
#include <shared/NsightHelpers.h>
#include <PerfStat.h>
#include <DependencyManager.h>
#include <NumericalConstants.h>
@ -25,8 +27,6 @@
#include "GLEscrow.h"
#include "OffscreenGLCanvas.h"
// FIXME move to threaded rendering with Qt 5.5
//#define QML_THREADED
// Time between receiving a request to render the offscreen UI actually triggering
// the render. Could possibly be increased depending on the framerate we expect to
@ -56,13 +56,11 @@ private:
Q_DECLARE_LOGGING_CATEGORY(offscreenFocus)
Q_LOGGING_CATEGORY(offscreenFocus, "hifi.offscreen.focus")
#ifdef QML_THREADED
static const QEvent::Type INIT = QEvent::Type(QEvent::User + 1);
static const QEvent::Type RENDER = QEvent::Type(QEvent::User + 2);
static const QEvent::Type RESIZE = QEvent::Type(QEvent::User + 3);
static const QEvent::Type STOP = QEvent::Type(QEvent::User + 4);
static const QEvent::Type UPDATE = QEvent::Type(QEvent::User + 5);
#endif
class OffscreenQmlRenderer : public OffscreenGLCanvas {
friend class OffscreenQmlSurface;
@ -70,22 +68,30 @@ public:
OffscreenQmlRenderer(OffscreenQmlSurface* surface, QOpenGLContext* shareContext) : _surface(surface) {
OffscreenGLCanvas::create(shareContext);
#ifdef QML_THREADED
_renderControl = new QMyQuickRenderControl();
// Create a QQuickWindow that is associated with out render control. Note that this
// window never gets created or shown, meaning that it will never get an underlying
// native (platform) window.
QQuickWindow::setDefaultAlphaBuffer(true);
// Weirdness... QQuickWindow NEEDS to be created on the rendering thread, or it will refuse to render
// because it retains an internal 'context' object that retains the thread it was created on,
// regardless of whether you later move it to another thread.
_quickWindow = new QQuickWindow(_renderControl);
_quickWindow->setColor(QColor(255, 255, 255, 0));
_quickWindow->setFlags(_quickWindow->flags() | static_cast<Qt::WindowFlags>(Qt::WA_TranslucentBackground));
// Qt 5.5
_renderControl->prepareThread(_renderThread);
_renderControl->prepareThread(&_thread);
_context->moveToThread(&_thread);
moveToThread(&_thread);
_thread.setObjectName("QML Thread");
_thread.start();
post(INIT);
#else
init();
#endif
}
#ifdef QML_THREADED
bool event(QEvent *e)
{
bool event(QEvent *e) {
switch (int(e->type())) {
case INIT:
{
@ -120,7 +126,6 @@ public:
QCoreApplication::postEvent(this, new QEvent(type));
}
#endif
private:
@ -143,27 +148,9 @@ private:
void init() {
_renderControl = new QMyQuickRenderControl();
connect(_renderControl, &QQuickRenderControl::renderRequested, _surface, &OffscreenQmlSurface::requestRender);
connect(_renderControl, &QQuickRenderControl::sceneChanged, _surface, &OffscreenQmlSurface::requestUpdate);
// Create a QQuickWindow that is associated with out render control. Note that this
// window never gets created or shown, meaning that it will never get an underlying
// native (platform) window.
QQuickWindow::setDefaultAlphaBuffer(true);
// Weirdness... QQuickWindow NEEDS to be created on the rendering thread, or it will refuse to render
// because it retains an internal 'context' object that retains the thread it was created on,
// regardless of whether you later move it to another thread.
_quickWindow = new QQuickWindow(_renderControl);
_quickWindow->setColor(QColor(255, 255, 255, 0));
_quickWindow->setFlags(_quickWindow->flags() | static_cast<Qt::WindowFlags>(Qt::WA_TranslucentBackground));
#ifdef QML_THREADED
// However, because we want to use synchronous events with the quickwindow, we need to move it back to the main
// thread after it's created.
_quickWindow->moveToThread(qApp->thread());
#endif
if (!makeCurrent()) {
qWarning("Failed to make context current on render thread");
return;
@ -189,17 +176,15 @@ private:
doneCurrent();
#ifdef QML_THREADED
_context->moveToThread(QCoreApplication::instance()->thread());
_cond.wakeOne();
#endif
}
void resize(const QSize& newSize) {
void resize() {
// Update our members
if (_quickWindow) {
_quickWindow->setGeometry(QRect(QPoint(), newSize));
_quickWindow->contentItem()->setSize(newSize);
_quickWindow->setGeometry(QRect(QPoint(), _newSize));
_quickWindow->contentItem()->setSize(_newSize);
}
// Qt bug in 5.4 forces this check of pixel ratio,
@ -209,7 +194,7 @@ private:
pixelRatio = _renderControl->_renderWindow->devicePixelRatio();
}
uvec2 newOffscreenSize = toGlm(newSize * pixelRatio);
uvec2 newOffscreenSize = toGlm(_newSize * pixelRatio);
_textures.setSize(newOffscreenSize);
if (newOffscreenSize == _size) {
return;
@ -222,7 +207,7 @@ private:
return;
}
qDebug() << "Offscreen UI resizing to " << newSize.width() << "x" << newSize.height() << " with pixel ratio " << pixelRatio;
qDebug() << "Offscreen UI resizing to " << _newSize.width() << "x" << _newSize.height() << " with pixel ratio " << pixelRatio;
setupFbo();
doneCurrent();
}
@ -237,54 +222,44 @@ private:
return;
}
//Q_ASSERT(toGlm(_quickWindow->geometry().size()) == _size);
//Q_ASSERT(toGlm(_quickWindow->geometry().size()) == _textures._size);
_renderControl->sync();
#ifdef QML_THREADED
_cond.wakeOne();
lock->unlock();
#endif
using namespace oglplus;
_quickWindow->setRenderTarget(GetName(*_fbo), QSize(_size.x, _size.y));
TexturePtr texture = _textures.getNextTexture();
_fbo->Bind(Framebuffer::Target::Draw);
_fbo->AttachTexture(Framebuffer::Target::Draw, FramebufferAttachment::Color, *texture, 0);
_fbo->Complete(Framebuffer::Target::Draw);
//Context::Clear().ColorBuffer();
{
_renderControl->render();
// FIXME The web browsers seem to be leaving GL in an error state.
// Need a debug context with sync logging to figure out why.
// for now just clear the errors
glGetError();
PROFILE_RANGE("qml_render")
TexturePtr texture = _textures.getNextTexture();
_fbo->Bind(Framebuffer::Target::Draw);
_fbo->AttachTexture(Framebuffer::Target::Draw, FramebufferAttachment::Color, *texture, 0);
_fbo->Complete(Framebuffer::Target::Draw);
{
_renderControl->render();
// FIXME The web browsers seem to be leaving GL in an error state.
// Need a debug context with sync logging to figure out why.
// for now just clear the errors
glGetError();
}
// FIXME probably unecessary
DefaultFramebuffer().Bind(Framebuffer::Target::Draw);
_quickWindow->resetOpenGLState();
_escrow.submit(GetName(*texture));
}
// FIXME probably unecessary
DefaultFramebuffer().Bind(Framebuffer::Target::Draw);
_quickWindow->resetOpenGLState();
_escrow.submit(GetName(*texture));
_lastRenderTime = usecTimestampNow();
}
void aboutToQuit() {
#ifdef QML_THREADED
QMutexLocker lock(&_quitMutex);
_quit = true;
#endif
}
void stop() {
#ifdef QML_THREADED
QMutexLocker lock(&_quitMutex);
QMutexLocker lock(&_mutex);
post(STOP);
_cond.wait(&_mutex);
#else
cleanup();
#endif
}
bool allowNewFrame(uint8_t fps) {
@ -297,13 +272,12 @@ private:
QQuickWindow* _quickWindow{ nullptr };
QMyQuickRenderControl* _renderControl{ nullptr };
#ifdef QML_THREADED
QThread _thread;
QMutex _mutex;
QWaitCondition _cond;
QMutex _quitMutex;
#endif
QSize _newSize;
bool _quit;
FramebufferPtr _fbo;
RenderbufferPtr _depthStencil;
@ -346,9 +320,7 @@ void OffscreenQmlSurface::create(QOpenGLContext* shareContext) {
}
void OffscreenQmlSurface::resize(const QSize& newSize) {
#ifdef QML_THREADED
QMutexLocker _locker(&(_renderer->_mutex));
#endif
if (!_renderer || !_renderer->_quickWindow) {
QSize currentSize = _renderer->_quickWindow->geometry().size();
if (newSize == currentSize) {
@ -362,11 +334,12 @@ void OffscreenQmlSurface::resize(const QSize& newSize) {
_rootItem->setSize(newSize);
}
#ifdef QML_THREADED
{
QMutexLocker _locker(&(_renderer->_mutex));
_renderer->_newSize = newSize;
}
_renderer->post(RESIZE);
#else
_renderer->resize(newSize);
#endif
}
QQuickItem* OffscreenQmlSurface::getRootItem() {
@ -466,11 +439,7 @@ void OffscreenQmlSurface::updateQuick() {
}
if (_render) {
#ifdef QML_THREADED
_renderer->post(RENDER);
#else
_renderer->render(nullptr);
#endif
_render = false;
}

View file

@ -16,13 +16,6 @@
#if defined(NSIGHT_FOUND)
#include "nvToolsExt.h"
ProfileRange::ProfileRange(const char *name) {
nvtxRangePush(name);
}
ProfileRange::~ProfileRange() {
nvtxRangePop();
}
ProfileRangeBatch::ProfileRangeBatch(gpu::Batch& batch, const char *name) : _batch(batch) {
_batch.pushProfileRange(name);
}

View file

@ -15,6 +15,8 @@
#include <mutex>
#include <functional>
#include <shared/NsightHelpers.h>
#include "Framebuffer.h"
#include "Pipeline.h"
#include "Query.h"
@ -22,18 +24,6 @@
#include "Texture.h"
#include "Transform.h"
#if defined(NSIGHT_FOUND)
class ProfileRange {
public:
ProfileRange(const char *name);
~ProfileRange();
};
#define PROFILE_RANGE(name) ProfileRange profileRangeThis(name);
#else
#define PROFILE_RANGE(name)
#endif
class QDebug;
namespace gpu {

View file

@ -106,6 +106,7 @@ public:
bool setAttribute(Slot slot, Frequency frequency = PER_VERTEX);
bool setAttribute(Slot slot, Slot channel, Frequency frequency = PER_VERTEX);
bool hasAttribute(Slot slot) const { return (_attributes.find(slot) != _attributes.end()); }
protected:
AttributeMap _attributes;

View file

@ -135,7 +135,7 @@ TransformCamera getTransformCamera() {
<@func transformClipToEyeDir(cameraTransform, clipPos, eyeDir)@>
{ // transformClipToEyeDir
<$eyeDir$> = vec3(<$cameraTransform$>._projectionInverse * vec4(<$clipPos$>.xyz, 0.0));
<$eyeDir$> = vec3(<$cameraTransform$>._projectionInverse * vec4(<$clipPos$>.xyz, 1.0)); // Must be 1.0 here
}
<@endfunc@>

View file

@ -92,7 +92,7 @@ void Mesh::setPartBuffer(const BufferView& buffer) {
_partBuffer = buffer;
}
const Box Mesh::evalPartBound(int partNum) const {
Box Mesh::evalPartBound(int partNum) const {
Box box;
if (partNum < _partBuffer.getNum<Part>()) {
const Part& part = _partBuffer.get<Part>(partNum);
@ -111,7 +111,7 @@ const Box Mesh::evalPartBound(int partNum) const {
return box;
}
const Box Mesh::evalPartBounds(int partStart, int partEnd, Boxes& bounds) const {
Box Mesh::evalPartBounds(int partStart, int partEnd, Boxes& bounds) const {
Box totalBound;
auto part = _partBuffer.cbegin<Part>() + partStart;
auto partItEnd = _partBuffer.cbegin<Part>() + partEnd;

View file

@ -107,10 +107,10 @@ public:
uint getNumParts() const { return _partBuffer.getNumElements(); }
// evaluate the bounding box of A part
const Box evalPartBound(int partNum) const;
Box evalPartBound(int partNum) const;
// evaluate the bounding boxes of the parts in the range [start, end[ and fill the bounds parameter
// the returned box is the bounding box of ALL the evaluated part bounds.
const Box evalPartBounds(int partStart, int partEnd, Boxes& bounds) const;
Box evalPartBounds(int partStart, int partEnd, Boxes& bounds) const;
static gpu::Primitive topologyToPrimitive(Topology topo) { return static_cast<gpu::Primitive>(topo); }

View file

@ -71,22 +71,12 @@ void Skybox::render(gpu::Batch& batch, const ViewFrustum& frustum) const {
void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Skybox& skybox) {
// Create the static shared elements used to render the skybox
static gpu::BufferPointer theBuffer;
static gpu::Stream::FormatPointer theFormat;
static gpu::BufferPointer theConstants;
static gpu::PipelinePointer thePipeline;
const int SKYBOX_SKYMAP_SLOT = 0;
const int SKYBOX_CONSTANTS_SLOT = 0;
static std::once_flag once;
std::call_once(once, [&] {
{
const float CLIP = 1.0f;
const glm::vec2 vertices[4] = { { -CLIP, -CLIP }, { CLIP, -CLIP }, { -CLIP, CLIP }, { CLIP, CLIP } };
theBuffer = std::make_shared<gpu::Buffer>(sizeof(vertices), (const gpu::Byte*) vertices);
theFormat = std::make_shared<gpu::Stream::Format>();
theFormat->setAttribute(gpu::Stream::POSITION, gpu::Stream::POSITION, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ));
}
{
auto skyVS = gpu::Shader::createVertex(std::string(Skybox_vert));
auto skyFS = gpu::Shader::createPixel(std::string(Skybox_frag));
@ -115,8 +105,6 @@ void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Sky
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewTransform);
batch.setModelTransform(Transform()); // only for Mac
batch.setInputBuffer(gpu::Stream::POSITION, theBuffer, 0, 8);
batch.setInputFormat(theFormat);
gpu::TexturePointer skymap;
if (skybox.getCubemap() && skybox.getCubemap()->isDefined()) {

View file

@ -11,21 +11,26 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
out vec3 _normal;
void main(void) {
void main(void) {
const float depth = 0.0;
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, depth, 1.0),
vec4(1.0, -1.0, depth, 1.0),
vec4(-1.0, 1.0, depth, 1.0),
vec4(1.0, 1.0, depth, 1.0)
);
vec4 inPosition = UNIT_QUAD[gl_VertexID];
// standard transform
TransformCamera cam = getTransformCamera();
vec3 clipDir = vec3(inPosition.xy, 0.0);
vec3 eyeDir;
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>
<$transformEyeToWorldDir(cam, eyeDir, _normal)$>

View file

@ -41,7 +41,7 @@ PacketVersion versionForPacketType(PacketType packetType) {
case PacketType::EntityAdd:
case PacketType::EntityEdit:
case PacketType::EntityData:
return VERSION_ENTITITES_REMOVED_START_AUTOMATICALLY_FROM_ANIMATION_PROPERTY_GROUP;
return VERSION_ENTITIES_REMOVED_START_AUTOMATICALLY_FROM_ANIMATION_PROPERTY_GROUP;
case PacketType::AvatarData:
case PacketType::BulkAvatarData:
return 17;

View file

@ -161,6 +161,6 @@ const PacketVersion VERSION_ENTITIES_KEYLIGHT_PROPERTIES_GROUP_BIS = 48;
const PacketVersion VERSION_ENTITIES_PARTICLES_ADDITIVE_BLENDING = 49;
const PacketVersion VERSION_ENTITIES_POLYLINE_TEXTURE = 50;
const PacketVersion VERSION_ENTITIES_HAVE_PARENTS = 51;
const PacketVersion VERSION_ENTITITES_REMOVED_START_AUTOMATICALLY_FROM_ANIMATION_PROPERTY_GROUP = 52;
const PacketVersion VERSION_ENTITIES_REMOVED_START_AUTOMATICALLY_FROM_ANIMATION_PROPERTY_GROUP = 52;
#endif // hifi_PacketHeaders_h

View file

@ -56,6 +56,7 @@ void OctreeHeadlessViewer::queryOctree() {
_octreeQuery.setCameraNearClip(_viewFrustum.getNearClip());
_octreeQuery.setCameraFarClip(_viewFrustum.getFarClip());
_octreeQuery.setCameraEyeOffsetPosition(glm::vec3());
_octreeQuery.setKeyholeRadius(_viewFrustum.getKeyholeRadius());
_octreeQuery.setOctreeSizeScale(getVoxelSizeScale());
_octreeQuery.setBoundaryLevelAdjust(getBoundaryLevelAdjust());

View file

@ -63,6 +63,9 @@ int OctreeQuery::getBroadcastData(unsigned char* destinationBuffer) {
// desired boundaryLevelAdjust
memcpy(destinationBuffer, &_boundaryLevelAdjust, sizeof(_boundaryLevelAdjust));
destinationBuffer += sizeof(_boundaryLevelAdjust);
memcpy(destinationBuffer, &_keyholeRadius, sizeof(_keyholeRadius));
destinationBuffer += sizeof(_keyholeRadius);
return destinationBuffer - bufferStart;
}
@ -104,6 +107,12 @@ int OctreeQuery::parseData(ReceivedMessage& message) {
memcpy(&_boundaryLevelAdjust, sourceBuffer, sizeof(_boundaryLevelAdjust));
sourceBuffer += sizeof(_boundaryLevelAdjust);
auto bytesRead = sourceBuffer - startPosition;
auto bytesLeft = message.getSize() - bytesRead;
if (bytesLeft >= sizeof(_keyholeRadius)) {
memcpy(&_keyholeRadius, sourceBuffer, sizeof(_keyholeRadius));
sourceBuffer += sizeof(_keyholeRadius);
}
return sourceBuffer - startPosition;
}

View file

@ -58,6 +58,7 @@ public:
float getCameraNearClip() const { return _cameraNearClip; }
float getCameraFarClip() const { return _cameraFarClip; }
const glm::vec3& getCameraEyeOffsetPosition() const { return _cameraEyeOffsetPosition; }
float getKeyholeRadius() const { return _keyholeRadius; }
glm::vec3 calculateCameraDirection() const;
@ -69,6 +70,7 @@ public:
void setCameraNearClip(float nearClip) { _cameraNearClip = nearClip; }
void setCameraFarClip(float farClip) { _cameraFarClip = farClip; }
void setCameraEyeOffsetPosition(const glm::vec3& eyeOffsetPosition) { _cameraEyeOffsetPosition = eyeOffsetPosition; }
void setKeyholeRadius(float keyholeRadius) { _keyholeRadius = keyholeRadius; }
// related to Octree Sending strategies
int getMaxQueryPacketsPerSecond() const { return _maxQueryPPS; }
@ -88,6 +90,7 @@ protected:
float _cameraAspectRatio = 1.0f;
float _cameraNearClip = 0.0f;
float _cameraFarClip = 0.0f;
float _keyholeRadius { 0.0f };
glm::vec3 _cameraEyeOffsetPosition = glm::vec3(0.0f);
// octree server sending items

View file

@ -51,14 +51,6 @@ void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum,
static gpu::Stream::FormatPointer theFormat;
if (skybox._procedural && skybox._procedural->_enabled && skybox._procedural->ready()) {
if (!theBuffer) {
const float CLIP = 1.0f;
const glm::vec2 vertices[4] = { { -CLIP, -CLIP }, { CLIP, -CLIP }, { -CLIP, CLIP }, { CLIP, CLIP } };
theBuffer = std::make_shared<gpu::Buffer>(sizeof(vertices), (const gpu::Byte*) vertices);
theFormat = std::make_shared<gpu::Stream::Format>();
theFormat->setAttribute(gpu::Stream::POSITION, gpu::Stream::POSITION, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ));
}
glm::mat4 projMat;
viewFrustum.evalProjectionMatrix(projMat);
@ -67,8 +59,6 @@ void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum,
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewTransform);
batch.setModelTransform(Transform()); // only for Mac
batch.setInputBuffer(gpu::Stream::POSITION, theBuffer, 0, 8);
batch.setInputFormat(theFormat);
if (skybox.getCubemap() && skybox.getCubemap()->isDefined()) {
batch.setResourceTexture(0, skybox.getCubemap());

View file

@ -25,7 +25,7 @@ namespace render {
// Return opaque for lack of a better idea
return ItemKey::Builder::opaqueShape();
}
template <> const Item::Bound payloadGetBound(const MeshPartPayload::Pointer& payload) {
if (payload) {
return payload->getBound();
@ -39,55 +39,40 @@ namespace render {
using namespace render;
MeshPartPayload::MeshPartPayload(Model* model, int meshIndex, int partIndex, int shapeIndex,
glm::vec3 position, glm::quat orientation) :
model(model),
meshIndex(meshIndex),
partIndex(partIndex),
_shapeID(shapeIndex),
_modelPosition(position),
_modelOrientation(orientation) {
initCache();
MeshPartPayload::MeshPartPayload(model::MeshPointer mesh, int partIndex, model::MaterialPointer material, const Transform& transform, const Transform& offsetTransform) {
updateMeshPart(mesh, partIndex);
updateMaterial(material);
updateTransform(transform, offsetTransform);
}
void MeshPartPayload::initCache() {
const std::vector<std::unique_ptr<NetworkMesh>>& networkMeshes = model->_geometry->getMeshes();
const NetworkMesh& networkMesh = *(networkMeshes.at(meshIndex).get());
_drawMesh = networkMesh._mesh;
const FBXGeometry& geometry = model->_geometry->getFBXGeometry();
const FBXMesh& mesh = geometry.meshes.at(meshIndex);
_hasColorAttrib = !mesh.colors.isEmpty();
_isBlendShaped = !mesh.blendshapes.isEmpty();
_isSkinned = !mesh.clusterIndices.isEmpty();
_drawPart = _drawMesh->getPartBuffer().get<model::Mesh::Part>(partIndex);
auto networkMaterial = model->_geometry->getShapeMaterial(_shapeID);
if (networkMaterial) {
_drawMaterial = networkMaterial->_material;
};
void MeshPartPayload::updateMeshPart(model::MeshPointer drawMesh, int partIndex) {
_drawMesh = drawMesh;
if (_drawMesh) {
auto vertexFormat = _drawMesh->getVertexFormat();
_hasColorAttrib = vertexFormat->hasAttribute(gpu::Stream::COLOR);
_drawPart = _drawMesh->getPartBuffer().get<model::Mesh::Part>(partIndex);
_localBound = _drawMesh->evalPartBound(partIndex);
}
}
void MeshPartPayload::updateModelLocation(glm::vec3 position, glm::quat orientation) {
_modelPosition = position;
_modelOrientation = orientation;
void MeshPartPayload::updateTransform(const Transform& transform, const Transform& offsetTransform) {
_transform = transform;
_offsetTransform = offsetTransform;
Transform::mult(_drawTransform, _transform, _offsetTransform);
_worldBound = _localBound;
_worldBound.transform(_drawTransform);
}
void MeshPartPayload::updateMaterial(model::MaterialPointer drawMaterial) {
_drawMaterial = drawMaterial;
}
render::ItemKey MeshPartPayload::getKey() const {
ItemKey::Builder builder;
builder.withTypeShape();
if (!model->isVisible()) {
builder.withInvisible();
}
if (_isBlendShaped || _isSkinned) {
builder.withDeformed();
}
if (_drawMaterial) {
auto matKey = _drawMaterial->getKey();
if (matKey.isTransparent() || matKey.isTransparentMap()) {
@ -99,9 +84,7 @@ render::ItemKey MeshPartPayload::getKey() const {
}
render::Item::Bound MeshPartPayload::getBound() const {
// NOTE: we can't cache this bounds because we need to handle the case of a moving
// entity or mesh part.
return model->getPartBounds(meshIndex, partIndex, _modelPosition, _modelOrientation);
return _worldBound;
}
void MeshPartPayload::drawCall(gpu::Batch& batch) const {
@ -109,22 +92,12 @@ void MeshPartPayload::drawCall(gpu::Batch& batch) const {
}
void MeshPartPayload::bindMesh(gpu::Batch& batch) const {
if (!_isBlendShaped) {
batch.setIndexBuffer(gpu::UINT32, (_drawMesh->getIndexBuffer()._buffer), 0);
batch.setInputFormat((_drawMesh->getVertexFormat()));
batch.setInputStream(0, _drawMesh->getVertexStream());
} else {
batch.setIndexBuffer(gpu::UINT32, (_drawMesh->getIndexBuffer()._buffer), 0);
batch.setIndexBuffer(gpu::UINT32, (_drawMesh->getIndexBuffer()._buffer), 0);
batch.setInputFormat((_drawMesh->getVertexFormat()));
batch.setInputFormat((_drawMesh->getVertexFormat()));
batch.setInputStream(0, _drawMesh->getVertexStream());
batch.setInputBuffer(0, model->_blendedVertexBuffers[meshIndex], 0, sizeof(glm::vec3));
batch.setInputBuffer(1, model->_blendedVertexBuffers[meshIndex], _drawMesh->getNumVertices() * sizeof(glm::vec3), sizeof(glm::vec3));
batch.setInputStream(2, _drawMesh->getVertexStream().makeRangedStream(2));
}
// TODO: Get rid of that extra call
if (!_hasColorAttrib) {
batch._glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
@ -215,31 +188,208 @@ void MeshPartPayload::bindMaterial(gpu::Batch& batch, const ModelRender::Locatio
}
void MeshPartPayload::bindTransform(gpu::Batch& batch, const ModelRender::Locations* locations) const {
// Still relying on the raw data from the model
const Model::MeshState& state = model->_meshStates.at(meshIndex);
Transform transform;
if (state.clusterBuffer) {
if (model->_cauterizeBones) {
batch.setUniformBuffer(ModelRender::SKINNING_GPU_SLOT, state.cauterizedClusterBuffer);
} else {
batch.setUniformBuffer(ModelRender::SKINNING_GPU_SLOT, state.clusterBuffer);
}
} else {
if (model->_cauterizeBones) {
transform = Transform(state.cauterizedClusterMatrices[0]);
} else {
transform = Transform(state.clusterMatrices[0]);
}
}
transform.preTranslate(_modelPosition);
batch.setModelTransform(transform);
batch.setModelTransform(_drawTransform);
}
void MeshPartPayload::render(RenderArgs* args) const {
PerformanceTimer perfTimer("MeshPartPayload::render");
if (!model->_readyWhenAdded || !model->_isVisible) {
gpu::Batch& batch = *(args->_batch);
auto mode = args->_renderMode;
auto alphaThreshold = args->_alphaThreshold; //translucent ? TRANSPARENT_ALPHA_THRESHOLD : OPAQUE_ALPHA_THRESHOLD; // FIX ME
model::MaterialKey drawMaterialKey;
if (_drawMaterial) {
drawMaterialKey = _drawMaterial->getKey();
}
bool translucentMesh = drawMaterialKey.isTransparent() || drawMaterialKey.isTransparentMap();
bool hasTangents = drawMaterialKey.isNormalMap();
bool hasSpecular = drawMaterialKey.isGlossMap();
bool hasLightmap = drawMaterialKey.isLightmapMap();
bool isSkinned = false;
bool wireframe = false;
if (wireframe) {
translucentMesh = hasTangents = hasSpecular = hasLightmap = isSkinned = false;
}
ModelRender::Locations* locations = nullptr;
ModelRender::pickPrograms(batch, mode, translucentMesh, alphaThreshold, hasLightmap, hasTangents, hasSpecular, isSkinned, wireframe,
args, locations);
// Bind the model transform and the skinCLusterMatrices if needed
bindTransform(batch, locations);
//Bind the index buffer and vertex buffer and Blend shapes if needed
bindMesh(batch);
// apply material properties
bindMaterial(batch, locations);
// TODO: We should be able to do that just in the renderTransparentJob
if (translucentMesh && locations->lightBufferUnit >= 0) {
PerformanceTimer perfTimer("DLE->setupTransparent()");
DependencyManager::get<DeferredLightingEffect>()->setupTransparent(args, locations->lightBufferUnit);
}
if (args) {
args->_details._materialSwitches++;
}
// Draw!
{
PerformanceTimer perfTimer("batch.drawIndexed()");
drawCall(batch);
}
if (args) {
const int INDICES_PER_TRIANGLE = 3;
args->_details._trianglesRendered += _drawPart._numIndices / INDICES_PER_TRIANGLE;
}
}
namespace render {
template <> const ItemKey payloadGetKey(const ModelMeshPartPayload::Pointer& payload) {
if (payload) {
return payload->getKey();
}
// Return opaque for lack of a better idea
return ItemKey::Builder::opaqueShape();
}
template <> const Item::Bound payloadGetBound(const ModelMeshPartPayload::Pointer& payload) {
if (payload) {
return payload->getBound();
}
return render::Item::Bound();
}
template <> void payloadRender(const ModelMeshPartPayload::Pointer& payload, RenderArgs* args) {
return payload->render(args);
}
}
using namespace render;
ModelMeshPartPayload::ModelMeshPartPayload(Model* model, int _meshIndex, int partIndex, int shapeIndex, const Transform& transform, const Transform& offsetTransform) :
_model(model),
_meshIndex(_meshIndex),
_shapeID(shapeIndex) {
auto& modelMesh = _model->_geometry->getMeshes().at(_meshIndex)->_mesh;
updateMeshPart(modelMesh, partIndex);
updateTransform(transform, offsetTransform);
initCache();
}
void ModelMeshPartPayload::initCache() {
if (_drawMesh) {
auto vertexFormat = _drawMesh->getVertexFormat();
_hasColorAttrib = vertexFormat->hasAttribute(gpu::Stream::COLOR);
_isSkinned = vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT) && vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_INDEX);
const FBXGeometry& geometry = _model->_geometry->getFBXGeometry();
const FBXMesh& mesh = geometry.meshes.at(_meshIndex);
_isBlendShaped = !mesh.blendshapes.isEmpty();
}
auto networkMaterial = _model->_geometry->getShapeMaterial(_shapeID);
if (networkMaterial) {
_drawMaterial = networkMaterial->_material;
};
}
void ModelMeshPartPayload::notifyLocationChanged() {
_model->_needsUpdateClusterMatrices = true;
}
render::ItemKey ModelMeshPartPayload::getKey() const {
ItemKey::Builder builder;
builder.withTypeShape();
if (!_model->isVisible()) {
builder.withInvisible();
}
if (_isBlendShaped || _isSkinned) {
builder.withDeformed();
}
if (_drawMaterial) {
auto matKey = _drawMaterial->getKey();
if (matKey.isTransparent() || matKey.isTransparentMap()) {
builder.withTransparent();
}
}
return builder.build();
}
render::Item::Bound ModelMeshPartPayload::getBound() const {
// NOTE: we can't cache this bounds because we need to handle the case of a moving
// entity or mesh part.
return _model->getPartBounds(_meshIndex, _partIndex, _transform.getTranslation(), _transform.getRotation());
}
void ModelMeshPartPayload::bindMesh(gpu::Batch& batch) const {
if (!_isBlendShaped) {
batch.setIndexBuffer(gpu::UINT32, (_drawMesh->getIndexBuffer()._buffer), 0);
batch.setInputFormat((_drawMesh->getVertexFormat()));
batch.setInputStream(0, _drawMesh->getVertexStream());
} else {
batch.setIndexBuffer(gpu::UINT32, (_drawMesh->getIndexBuffer()._buffer), 0);
batch.setInputFormat((_drawMesh->getVertexFormat()));
batch.setInputBuffer(0, _model->_blendedVertexBuffers[_meshIndex], 0, sizeof(glm::vec3));
batch.setInputBuffer(1, _model->_blendedVertexBuffers[_meshIndex], _drawMesh->getNumVertices() * sizeof(glm::vec3), sizeof(glm::vec3));
batch.setInputStream(2, _drawMesh->getVertexStream().makeRangedStream(2));
}
// TODO: Get rid of that extra call
if (!_hasColorAttrib) {
batch._glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
}
}
void ModelMeshPartPayload::bindTransform(gpu::Batch& batch, const ModelRender::Locations* locations) const {
// Still relying on the raw data from the model
const Model::MeshState& state = _model->_meshStates.at(_meshIndex);
Transform transform;
if (state.clusterBuffer) {
if (_model->_cauterizeBones) {
batch.setUniformBuffer(ModelRender::SKINNING_GPU_SLOT, state.cauterizedClusterBuffer);
} else {
batch.setUniformBuffer(ModelRender::SKINNING_GPU_SLOT, state.clusterBuffer);
}
} else {
if (_model->_cauterizeBones) {
transform = Transform(state.cauterizedClusterMatrices[0]);
} else {
transform = Transform(state.clusterMatrices[0]);
}
}
// transform.preTranslate(_modelPosition);
transform.preTranslate(_transform.getTranslation());
batch.setModelTransform(transform);
}
void ModelMeshPartPayload::render(RenderArgs* args) const {
PerformanceTimer perfTimer("ModelMeshPartPayload::render");
if (!_model->_readyWhenAdded || !_model->_isVisible) {
return; // bail asap
}
@ -248,25 +398,25 @@ void MeshPartPayload::render(RenderArgs* args) const {
auto alphaThreshold = args->_alphaThreshold; //translucent ? TRANSPARENT_ALPHA_THRESHOLD : OPAQUE_ALPHA_THRESHOLD; // FIX ME
const FBXGeometry& geometry = model->_geometry->getFBXGeometry();
const std::vector<std::unique_ptr<NetworkMesh>>& networkMeshes = model->_geometry->getMeshes();
const FBXGeometry& geometry = _model->_geometry->getFBXGeometry();
const std::vector<std::unique_ptr<NetworkMesh>>& networkMeshes = _model->_geometry->getMeshes();
// guard against partially loaded meshes
if (meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)geometry.meshes.size() || meshIndex >= (int)model->_meshStates.size() ) {
if (_meshIndex >= (int)networkMeshes.size() || _meshIndex >= (int)geometry.meshes.size() || _meshIndex >= (int)_model->_meshStates.size() ) {
return;
}
// Back to model to update the cluster matrices right now
model->updateClusterMatrices(_modelPosition, _modelOrientation);
_model->updateClusterMatrices(_transform.getTranslation(), _transform.getRotation());
const FBXMesh& mesh = geometry.meshes.at(meshIndex);
const FBXMesh& mesh = geometry.meshes.at(_meshIndex);
// if our index is ever out of range for either meshes or networkMeshes, then skip it, and set our _meshGroupsKnown
// to false to rebuild out mesh groups.
if (meshIndex < 0 || meshIndex >= (int)networkMeshes.size() || meshIndex > geometry.meshes.size()) {
model->_meshGroupsKnown = false; // regenerate these lists next time around.
model->_readyWhenAdded = false; // in case any of our users are using scenes
model->invalidCalculatedMeshBoxes(); // if we have to reload, we need to assume our mesh boxes are all invalid
if (_meshIndex < 0 || _meshIndex >= (int)networkMeshes.size() || _meshIndex > geometry.meshes.size()) {
_model->_meshGroupsKnown = false; // regenerate these lists next time around.
_model->_readyWhenAdded = false; // in case any of our users are using scenes
_model->invalidCalculatedMeshBoxes(); // if we have to reload, we need to assume our mesh boxes are all invalid
return; // FIXME!
}
@ -276,13 +426,7 @@ void MeshPartPayload::render(RenderArgs* args) const {
// sanity check
return; // FIXME!
}
// guard against partially loaded meshes
if (partIndex >= mesh.parts.size()) {
return;
}
model::MaterialKey drawMaterialKey;
if (_drawMaterial) {
drawMaterialKey = _drawMaterial->getKey();
@ -293,12 +437,12 @@ void MeshPartPayload::render(RenderArgs* args) const {
bool hasSpecular = drawMaterialKey.isGlossMap();
bool hasLightmap = drawMaterialKey.isLightmapMap();
bool isSkinned = _isSkinned;
bool wireframe = model->isWireframe();
bool wireframe = _model->isWireframe();
// render the part bounding box
#ifdef DEBUG_BOUNDING_PARTS
{
AABox partBounds = getPartBounds(meshIndex, partIndex);
AABox partBounds = getPartBounds(_meshIndex, partIndex);
bool inView = args->_viewFrustum->boxInFrustum(partBounds) != ViewFrustum::OUTSIDE;
glm::vec4 cubeColor;

View file

@ -1,5 +1,5 @@
//
// MeshPartPayload.h
// ModelMeshPartPayload.h
// interface/src/renderer
//
// Created by Sam Gateau on 10/3/15.
@ -24,41 +24,44 @@ class Model;
class MeshPartPayload {
public:
MeshPartPayload(Model* model, int meshIndex, int partIndex, int shapeIndex, glm::vec3 position, glm::quat orientation);
MeshPartPayload() {}
MeshPartPayload(model::MeshPointer mesh, int partIndex, model::MaterialPointer material, const Transform& transform, const Transform& offsetTransform);
typedef render::Payload<MeshPartPayload> Payload;
typedef Payload::DataPointer Pointer;
Model* model;
int meshIndex;
int partIndex;
int _shapeID;
glm::vec3 _modelPosition;
glm::quat _modelOrientation;
virtual void updateMeshPart(model::MeshPointer drawMesh, int partIndex);
void updateModelLocation(glm::vec3 position, glm::quat orientation);
virtual void notifyLocationChanged() {}
virtual void updateTransform(const Transform& transform, const Transform& offsetTransform);
virtual void updateMaterial(model::MaterialPointer drawMaterial);
// Render Item interface
render::ItemKey getKey() const;
render::Item::Bound getBound() const;
void render(RenderArgs* args) const;
// MeshPartPayload functions to perform render
virtual render::ItemKey getKey() const;
virtual render::Item::Bound getBound() const;
virtual void render(RenderArgs* args) const;
// ModelMeshPartPayload functions to perform render
void drawCall(gpu::Batch& batch) const;
void bindMesh(gpu::Batch& batch) const;
void bindMaterial(gpu::Batch& batch, const ModelRender::Locations* locations) const;
void bindTransform(gpu::Batch& batch, const ModelRender::Locations* locations) const;
void initCache();
virtual void bindMesh(gpu::Batch& batch) const;
virtual void bindMaterial(gpu::Batch& batch, const ModelRender::Locations* locations) const;
virtual void bindTransform(gpu::Batch& batch, const ModelRender::Locations* locations) const;
// Payload resource cached values
model::MeshPointer _drawMesh;
int _partIndex = 0;
model::Mesh::Part _drawPart;
model::MaterialPointer _drawMaterial;
model::Box _localBound;
Transform _drawTransform;
Transform _transform;
Transform _offsetTransform;
mutable model::Box _worldBound;
bool _hasColorAttrib = false;
bool _isSkinned = false;
bool _isBlendShaped = false;
};
namespace render {
@ -67,4 +70,32 @@ namespace render {
template <> void payloadRender(const MeshPartPayload::Pointer& payload, RenderArgs* args);
}
class ModelMeshPartPayload : public MeshPartPayload {
public:
ModelMeshPartPayload(Model* model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform, const Transform& offsetTransform);
typedef render::Payload<ModelMeshPartPayload> Payload;
typedef Payload::DataPointer Pointer;
void notifyLocationChanged() override;
// Render Item interface
render::ItemKey getKey() const override;
render::Item::Bound getBound() const override;
void render(RenderArgs* args) const override;
// ModelMeshPartPayload functions to perform render
void bindMesh(gpu::Batch& batch) const override;
void bindTransform(gpu::Batch& batch, const ModelRender::Locations* locations) const override;
void initCache();
Model* _model;
int _meshIndex;
int _shapeID;
bool _isSkinned = false;
bool _isBlendShaped = false;
};
#endif // hifi_MeshPartPayload_h

View file

@ -90,7 +90,7 @@ void Model::setScale(const glm::vec3& scale) {
_scaledToFit = false;
}
const float METERS_PER_MILLIMETER = 0.01f;
const float METERS_PER_MILLIMETER = 0.01f;
void Model::setScaleInternal(const glm::vec3& scale) {
if (glm::distance(_scale, scale) > METERS_PER_MILLIMETER) {
@ -110,11 +110,19 @@ void Model::setOffset(const glm::vec3& offset) {
void Model::enqueueLocationChange() {
render::ScenePointer scene = AbstractViewStateInterface::instance()->getMain3DScene();
Transform transform;
transform.setTranslation(_translation);
transform.setRotation(_rotation);
Transform offset;
offset.setScale(_scale);
offset.postTranslate(_offset);
render::PendingChanges pendingChanges;
foreach (auto itemID, _renderItems.keys()) {
pendingChanges.updateItem<MeshPartPayload>(itemID, [=](MeshPartPayload& data) {
data.updateModelLocation(_translation, _rotation);
data.model->_needsUpdateClusterMatrices = true;
pendingChanges.updateItem<MeshPartPayload>(itemID, [transform, offset](MeshPartPayload& data) {
data.updateTransform(transform, offset);
data.notifyLocationChanged();
});
}
@ -495,11 +503,10 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
foreach (auto renderItem, _renderItemsSet) {
auto item = scene->allocateID();
auto renderData = MeshPartPayload::Pointer(renderItem);
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderItem);
pendingChanges.resetItem(item, renderPayload);
pendingChanges.updateItem<MeshPartPayload>(item, [&](MeshPartPayload& data) {
data.model->_needsUpdateClusterMatrices = true;
pendingChanges.updateItem<MeshPartPayload>(item, [](MeshPartPayload& data) {
data.notifyLocationChanged();
});
_renderItems.insert(item, renderPayload);
somethingAdded = true;
@ -523,12 +530,11 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene,
foreach (auto renderItem, _renderItemsSet) {
auto item = scene->allocateID();
auto renderData = MeshPartPayload::Pointer(renderItem);
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderItem);
renderPayload->addStatusGetters(statusGetters);
pendingChanges.resetItem(item, renderPayload);
pendingChanges.updateItem<MeshPartPayload>(item, [&](MeshPartPayload& data) {
data.model->_needsUpdateClusterMatrices = true;
pendingChanges.updateItem<MeshPartPayload>(item, [](MeshPartPayload& data) {
data.notifyLocationChanged();
});
_renderItems.insert(item, renderPayload);
somethingAdded = true;
@ -1127,8 +1133,14 @@ AABox Model::getPartBounds(int meshIndex, int partIndex, glm::vec3 modelPosition
void Model::segregateMeshGroups() {
QSharedPointer<NetworkGeometry> networkGeometry;
if (_showCollisionHull && _collisionGeometry && _collisionGeometry->isLoaded()) {
networkGeometry = _collisionGeometry;
bool showingCollisionHull = false;
if (_showCollisionHull && _collisionGeometry) {
if (_collisionGeometry->isLoaded()) {
networkGeometry = _collisionGeometry;
showingCollisionHull = true;
} else {
return;
}
} else {
networkGeometry = _geometry;
}
@ -1136,8 +1148,10 @@ void Model::segregateMeshGroups() {
const std::vector<std::unique_ptr<NetworkMesh>>& networkMeshes = networkGeometry->getMeshes();
// all of our mesh vectors must match in size
if ((int)networkMeshes.size() != geometry.meshes.size() ||
geometry.meshes.size() != _meshStates.size()) {
auto geoMeshesSize = geometry.meshes.size();
if ((int)networkMeshes.size() != geoMeshesSize ||
// geometry.meshes.size() != _meshStates.size()) {
geoMeshesSize > _meshStates.size()) {
qDebug() << "WARNING!!!! Mesh Sizes don't match! We will not segregate mesh groups yet.";
return;
}
@ -1147,15 +1161,30 @@ void Model::segregateMeshGroups() {
_renderItemsSet.clear();
Transform transform;
transform.setTranslation(_translation);
transform.setRotation(_rotation);
Transform offset;
offset.setScale(_scale);
offset.postTranslate(_offset);
// Run through all of the meshes, and place them into their segregated, but unsorted buckets
int shapeID = 0;
for (int i = 0; i < (int)networkMeshes.size(); i++) {
const FBXMesh& mesh = geometry.meshes.at(i);
const NetworkMesh& networkMesh = *(networkMeshes.at(i).get());
// Create the render payloads
int totalParts = mesh.parts.size();
for (int partIndex = 0; partIndex < totalParts; partIndex++) {
_renderItemsSet << std::make_shared<MeshPartPayload>(this, i, partIndex, shapeID, _translation, _rotation);
if (showingCollisionHull) {
_renderItemsSet << std::make_shared<MeshPartPayload>(networkMesh._mesh, partIndex, ModelRender::getCollisionHullMaterial(), transform, offset);
} else {
_renderItemsSet << std::make_shared<ModelMeshPartPayload>(this, i, partIndex, shapeID, transform, offset);
}
shapeID++;
}
}
@ -1168,15 +1197,22 @@ bool Model::initWhenReady(render::ScenePointer scene) {
render::PendingChanges pendingChanges;
Transform transform;
transform.setTranslation(_translation);
transform.setRotation(_rotation);
Transform offset;
offset.setScale(_scale);
offset.postTranslate(_offset);
foreach (auto renderItem, _renderItemsSet) {
auto item = scene->allocateID();
auto renderData = MeshPartPayload::Pointer(renderItem);
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderItem);
_renderItems.insert(item, renderPayload);
pendingChanges.resetItem(item, renderPayload);
pendingChanges.updateItem<MeshPartPayload>(item, [&](MeshPartPayload& data) {
data.updateModelLocation(_translation, _rotation);
data.model->_needsUpdateClusterMatrices = true;
pendingChanges.updateItem<MeshPartPayload>(item, [transform,offset](MeshPartPayload& data) {
data.updateTransform(transform, offset);
data.notifyLocationChanged();
});
}
scene->enqueuePendingChanges(pendingChanges);

View file

@ -366,7 +366,7 @@ private:
bool _needsUpdateClusterMatrices = true;
bool _showCollisionHull = false;
friend class MeshPartPayload;
friend class ModelMeshPartPayload;
protected:
RigPointer _rig;
};

View file

@ -280,3 +280,16 @@ void ModelRender::pickPrograms(gpu::Batch& batch, RenderArgs::RenderMode mode, b
DependencyManager::get<TextureCache>()->getNormalFittingTexture());
}
}
model::MaterialPointer ModelRender::_collisionHullMaterial;
model::MaterialPointer ModelRender::getCollisionHullMaterial() {
if (!_collisionHullMaterial) {
_collisionHullMaterial = std::make_shared<model::Material>();
_collisionHullMaterial->setDiffuse(glm::vec3(1.0f, 0.5f, 0.0f));
_collisionHullMaterial->setMetallic(0.02f);
_collisionHullMaterial->setGloss(1.0f);
}
return _collisionHullMaterial;
}

View file

@ -149,6 +149,11 @@ public:
static const RenderPipelineLib& getRenderPipelineLib();
// Collision hull Material
static model::MaterialPointer _collisionHullMaterial;
static model::MaterialPointer getCollisionHullMaterial();
};
#endif // hifi_ModelRender_h

View file

@ -4,3 +4,4 @@ set(TARGET_NAME shared)
setup_hifi_library(Gui Network Script Widgets)
target_zlib()
target_nsight()

View file

@ -12,6 +12,7 @@
#include "AABox.h"
#include "AACube.h"
#include "Transform.h"
#include "Extents.h"
#include "GeometryUtil.h"
#include "NumericalConstants.h"
@ -42,50 +43,6 @@ glm::vec3 AABox::calcCenter() const {
return center;
}
void AABox::rotate(const glm::quat& rotation) {
auto minimum = _corner;
auto maximum = _corner + _scale;
glm::vec3 bottomLeftNear(minimum.x, minimum.y, minimum.z);
glm::vec3 bottomRightNear(maximum.x, minimum.y, minimum.z);
glm::vec3 bottomLeftFar(minimum.x, minimum.y, maximum.z);
glm::vec3 bottomRightFar(maximum.x, minimum.y, maximum.z);
glm::vec3 topLeftNear(minimum.x, maximum.y, minimum.z);
glm::vec3 topRightNear(maximum.x, maximum.y, minimum.z);
glm::vec3 topLeftFar(minimum.x, maximum.y, maximum.z);
glm::vec3 topRightFar(maximum.x, maximum.y, maximum.z);
glm::vec3 bottomLeftNearRotated = rotation * bottomLeftNear;
glm::vec3 bottomRightNearRotated = rotation * bottomRightNear;
glm::vec3 bottomLeftFarRotated = rotation * bottomLeftFar;
glm::vec3 bottomRightFarRotated = rotation * bottomRightFar;
glm::vec3 topLeftNearRotated = rotation * topLeftNear;
glm::vec3 topRightNearRotated = rotation * topRightNear;
glm::vec3 topLeftFarRotated = rotation * topLeftFar;
glm::vec3 topRightFarRotated = rotation * topRightFar;
minimum = glm::min(bottomLeftNearRotated,
glm::min(bottomRightNearRotated,
glm::min(bottomLeftFarRotated,
glm::min(bottomRightFarRotated,
glm::min(topLeftNearRotated,
glm::min(topRightNearRotated,
glm::min(topLeftFarRotated,
topRightFarRotated)))))));
maximum = glm::max(bottomLeftNearRotated,
glm::max(bottomRightNearRotated,
glm::max(bottomLeftFarRotated,
glm::max(bottomRightFarRotated,
glm::max(topLeftNearRotated,
glm::max(topRightNearRotated,
glm::max(topLeftFarRotated,
topRightFarRotated)))))));
_corner = minimum;
_scale = maximum - minimum;
}
glm::vec3 AABox::getVertex(BoxVertex vertex) const {
switch (vertex) {
case BOTTOM_LEFT_NEAR:
@ -525,3 +482,59 @@ AABox& AABox::operator += (const AABox& box) {
}
return (*this);
}
void AABox::scale(const glm::vec3& scale) {
_corner *= scale;
_scale *= scale;
}
void AABox::rotate(const glm::quat& rotation) {
auto minimum = _corner;
auto maximum = _corner + _scale;
glm::vec3 bottomLeftNear(minimum.x, minimum.y, minimum.z);
glm::vec3 bottomRightNear(maximum.x, minimum.y, minimum.z);
glm::vec3 bottomLeftFar(minimum.x, minimum.y, maximum.z);
glm::vec3 bottomRightFar(maximum.x, minimum.y, maximum.z);
glm::vec3 topLeftNear(minimum.x, maximum.y, minimum.z);
glm::vec3 topRightNear(maximum.x, maximum.y, minimum.z);
glm::vec3 topLeftFar(minimum.x, maximum.y, maximum.z);
glm::vec3 topRightFar(maximum.x, maximum.y, maximum.z);
glm::vec3 bottomLeftNearRotated = rotation * bottomLeftNear;
glm::vec3 bottomRightNearRotated = rotation * bottomRightNear;
glm::vec3 bottomLeftFarRotated = rotation * bottomLeftFar;
glm::vec3 bottomRightFarRotated = rotation * bottomRightFar;
glm::vec3 topLeftNearRotated = rotation * topLeftNear;
glm::vec3 topRightNearRotated = rotation * topRightNear;
glm::vec3 topLeftFarRotated = rotation * topLeftFar;
glm::vec3 topRightFarRotated = rotation * topRightFar;
minimum = glm::min(bottomLeftNearRotated,
glm::min(bottomRightNearRotated,
glm::min(bottomLeftFarRotated,
glm::min(bottomRightFarRotated,
glm::min(topLeftNearRotated,
glm::min(topRightNearRotated,
glm::min(topLeftFarRotated,
topRightFarRotated)))))));
maximum = glm::max(bottomLeftNearRotated,
glm::max(bottomRightNearRotated,
glm::max(bottomLeftFarRotated,
glm::max(bottomRightFarRotated,
glm::max(topLeftNearRotated,
glm::max(topRightNearRotated,
glm::max(topLeftFarRotated,
topRightFarRotated)))))));
_corner = minimum;
_scale = maximum - minimum;
}
void AABox::transform(const Transform& transform) {
scale(transform.getScale());
rotate(transform.getRotation());
translate(transform.getTranslation());
}

View file

@ -24,6 +24,7 @@
class AACube;
class Extents;
class Transform;
class AABox {
@ -40,12 +41,7 @@ public:
void setBox(const glm::vec3& corner, float scale);
glm::vec3 getVertexP(const glm::vec3& normal) const;
glm::vec3 getVertexN(const glm::vec3& normal) const;
void shiftBy(const glm::vec3& delta) { _corner += delta; }
void rotate(const glm::quat& rotation);
void scale(float scale) { _corner *= scale; _scale *= scale; }
void scale(const glm::vec3& scale) { _corner *= scale; _scale *= scale; }
const glm::vec3& getCorner() const { return _corner; }
const glm::vec3& getScale() const { return _scale; }
const glm::vec3& getDimensions() const { return _scale; }
@ -85,6 +81,20 @@ public:
AABox& operator += (const glm::vec3& point);
AABox& operator += (const AABox& box);
// Translate the AABox just moving the corner
void translate(const glm::vec3& translation) { _corner += translation; }
// Rotate the AABox around its frame origin
// meaning rotating the corners of the AABox around the point {0,0,0} and reevaluating the min max
void rotate(const glm::quat& rotation);
/// Scale the AABox
void scale(float scale);
void scale(const glm::vec3& scale);
// Transform the extents with transform
void transform(const Transform& transform);
bool isInvalid() const { return _corner == glm::vec3(std::numeric_limits<float>::infinity()); }
private:

View file

@ -0,0 +1,22 @@
//
// Created by Bradley Austin Davis on 2015/12/10
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "NsightHelpers.h"
#if defined(NSIGHT_FOUND)
#include "nvToolsExt.h"
ProfileRange::ProfileRange(const char *name) {
nvtxRangePush(name);
}
ProfileRange::~ProfileRange() {
nvtxRangePop();
}
#endif

View file

@ -0,0 +1,24 @@
//
// Created by Bradley Austin Davis on 2015/12/10
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_gl_NsightHelpers_h
#define hifi_gl_NsightHelpers_h
#if defined(NSIGHT_FOUND)
class ProfileRange {
public:
ProfileRange(const char *name);
~ProfileRange();
};
#define PROFILE_RANGE(name) ProfileRange profileRangeThis(name);
#else
#define PROFILE_RANGE(name)
#endif
#endif