Merge branch 'master' of https://github.com/highfidelity/hifi into red

This commit is contained in:
samcake 2016-02-09 09:24:55 -08:00
commit b1b08bf2f7
69 changed files with 38799 additions and 465 deletions

View file

@ -11,10 +11,20 @@ macro(SETUP_HIFI_LIBRARY)
project(${TARGET_NAME})
# grab the implemenation and header files
# grab the implementation and header files
file(GLOB_RECURSE LIB_SRCS "src/*.h" "src/*.cpp" "src/*.c")
list(APPEND ${TARGET_NAME}_SRCS ${LIB_SRCS})
# add compiler flags to AVX source files
file(GLOB_RECURSE AVX_SRCS "src/avx/*.cpp" "src/avx/*.c")
foreach(SRC ${AVX_SRCS})
if (WIN32)
set_source_files_properties(${SRC} PROPERTIES COMPILE_FLAGS /arch:AVX)
elseif (APPLE OR UNIX)
set_source_files_properties(${SRC} PROPERTIES COMPILE_FLAGS -mavx)
endif()
endforeach()
setup_memory_debugger()
# create a library and set the property so it can be referenced later

View file

@ -0,0 +1,218 @@
//
// ACAudioSearchAndInject.js
// audio
//
// Created by Eric Levin 2/1/2016
// Copyright 2016 High Fidelity, Inc.
// This AC script searches for special sound entities nearby avatars and plays those sounds based off information specified in the entity's
// user data field ( see acAudioSearchAndCompatibilityEntitySpawner.js for an example)
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
Script.include("https://rawgit.com/highfidelity/hifi/master/examples/libraries/utils.js");
var SOUND_DATA_KEY = "soundKey";
var QUERY_RADIUS = 50;
EntityViewer.setKeyholeRadius(QUERY_RADIUS);
Entities.setPacketsPerSecond(6000);
Agent.isAvatar = true;
var DEFAULT_SOUND_DATA = {
volume: 0.5,
loop: false,
playbackGap: 1000, // in ms
playbackGapRange: 0 // in ms
};
var MIN_PLAYBACK_GAP = 0;
var UPDATE_TIME = 100;
var EXPIRATION_TIME = 5000;
var soundEntityMap = {};
var soundUrls = {};
var avatarPositions = [];
function update() {
var avatars = AvatarList.getAvatarIdentifiers();
for (var i = 0; i < avatars.length; i++) {
var avatar = AvatarList.getAvatar(avatars[i]);
var avatarPosition = avatar.position;
if (!avatarPosition) {
continue;
}
EntityViewer.setPosition(avatarPosition);
EntityViewer.queryOctree();
avatarPositions.push(avatarPosition);
}
Script.setTimeout(function() {
avatarPositions.forEach(function(avatarPosition) {
var entities = Entities.findEntities(avatarPosition, QUERY_RADIUS);
handleFoundSoundEntities(entities);
});
//Now wipe list for next query;
avatarPositions = [];
}, UPDATE_TIME);
handleActiveSoundEntities();
}
function handleActiveSoundEntities() {
// Go through all our sound entities, if they have passed expiration time, remove them from map
for (var potentialSoundEntity in soundEntityMap) {
if (!soundEntityMap.hasOwnProperty(potentialSoundEntity)) {
// The current property is not a direct property of soundEntityMap so ignore it
continue;
}
var soundEntity = potentialSoundEntity;
var soundProperties = soundEntityMap[soundEntity];
soundProperties.timeWithoutAvatarInRange += UPDATE_TIME;
if (soundProperties.timeWithoutAvatarInRange > EXPIRATION_TIME && soundProperties.soundInjector) {
// An avatar hasn't been within range of this sound entity recently, so remove it from map
soundProperties.soundInjector.stop();
delete soundEntityMap[soundEntity];
} else if (soundProperties.isDownloaded) {
// If this sound hasn't expired yet, we want to potentially play it!
if (soundProperties.readyToPlay) {
var newPosition = Entities.getEntityProperties(soundEntity, "position").position;
if (!soundProperties.soundInjector) {
soundProperties.soundInjector = Audio.playSound(soundProperties.sound, {
volume: soundProperties.volume,
position: newPosition,
loop: soundProperties.loop
});
} else {
soundProperties.soundInjector.restart();
}
soundProperties.readyToPlay = false;
} else if (soundProperties.sound && soundProperties.loop === false) {
// We need to check all of our entities that are not looping but have an interval associated with them
// to see if it's time for them to play again
soundProperties.timeSinceLastPlay += UPDATE_TIME;
if (soundProperties.timeSinceLastPlay > soundProperties.clipDuration + soundProperties.currentPlaybackGap) {
soundProperties.readyToPlay = true;
soundProperties.timeSinceLastPlay = 0;
// Now let's get our new current interval
soundProperties.currentPlaybackGap = soundProperties.playbackGap + randFloat(-soundProperties.playbackGapRange, soundProperties.playbackGapRange);
soundProperties.currentPlaybackGap = Math.max(MIN_PLAYBACK_GAP, soundProperties.currentPlaybackGap);
}
}
}
}
}
function handleFoundSoundEntities(entities) {
entities.forEach(function(entity) {
var soundData = getEntityCustomData(SOUND_DATA_KEY, entity);
if (soundData && soundData.url) {
//check sound entities list- if it's not in, add it
if (!soundEntityMap[entity]) {
var soundProperties = {
url: soundData.url,
volume: soundData.volume || DEFAULT_SOUND_DATA.volume,
loop: soundData.loop || DEFAULT_SOUND_DATA.loop,
playbackGap: soundData.playbackGap || DEFAULT_SOUND_DATA.playbackGap,
playbackGapRange: soundData.playbackGapRange || DEFAULT_SOUND_DATA.playbackGapRange,
readyToPlay: false,
position: Entities.getEntityProperties(entity, "position").position,
timeSinceLastPlay: 0,
timeWithoutAvatarInRange: 0,
isDownloaded: false
};
soundProperties.currentPlaybackGap = soundProperties.playbackGap + randFloat(-soundProperties.playbackGapRange, soundProperties.playbackGapRange);
soundProperties.currentPlaybackGap = Math.max(MIN_PLAYBACK_GAP, soundProperties.currentPlaybackGap);
soundEntityMap[entity] = soundProperties;
if (!soundUrls[soundData.url]) {
// We need to download sound before we add it to our map
var sound = SoundCache.getSound(soundData.url);
// Only add it to map once it's downloaded
soundUrls[soundData.url] = sound;
sound.ready.connect(function() {
soundProperties.sound = sound;
soundProperties.readyToPlay = true;
soundProperties.isDownloaded = true;
soundProperties.clipDuration = sound.duration * 1000;
soundEntityMap[entity] = soundProperties;
});
} else {
// We already have sound downloaded, so just add it to map right away
soundProperties.sound = soundUrls[soundData.url];
soundProperties.clipDuration = soundProperties.sound.duration * 1000;
soundProperties.readyToPlay = true;
soundProperties.isDownloaded = true;
soundEntityMap[entity] = soundProperties;
}
} else {
//If this sound is in our map already, we want to reset timeWithoutAvatarInRange
// Also we want to check to see if the entity has been updated with new sound data- if so we want to update!
soundEntityMap[entity].timeWithoutAvatarInRange = 0;
checkForSoundPropertyChanges(soundEntityMap[entity], soundData);
}
}
});
}
function checkForSoundPropertyChanges(currentProps, newProps) {
var needsNewInjector = false;
if (currentProps.playbackGap !== newProps.playbackGap && !currentProps.loop) {
// playbackGap only applies to non looping sounds
currentProps.playbackGap = newProps.playbackGap;
currentProps.currentPlaybackGap = currentProps.playbackGap + randFloat(-currentProps.playbackGapRange, currentProps.playbackGapRange);
currentProps.currentPlaybackGap = Math.max(MIN_PLAYBACK_GAP, currentProps.currentPlaybackGap);
currentProps.readyToPlay = true;
}
if (currentProps.playbackGapRange !== currentProps.playbackGapRange) {
currentProps.playbackGapRange = newProps.playbackGapRange;
currentProps.currentPlaybackGap = currentProps.playbackGap + randFloat(-currentProps.playbackGapRange, currentProps.playbackGapRange);
currentProps.currentPlaybackGap = Math.max(MIN_PLAYBACK_GAP, currentProps.currentPlaybackGap);
currentProps.readyToPlay = true;
}
if (currentProps.volume !== newProps.volume) {
currentProps.volume = newProps.volume;
needsNewInjector = true;
}
if (currentProps.url !== newProps.url) {
currentProps.url = newProps.url;
currentProps.sound = null;
if (!soundUrls[currentProps.url]) {
var sound = SoundCache.getSound(currentProps.url);
currentProps.isDownloaded = false;
sound.ready.connect(function() {
currentProps.sound = sound;
currentProps.clipDuration = sound.duration * 1000;
currentProps.isDownloaded = true;
});
} else {
currentProps.sound = sound;
currentProps.clipDuration = sound.duration * 1000;
}
needsNewInjector = true;
}
if (currentProps.loop !== newProps.loop) {
currentProps.loop = newProps.loop;
needsNewInjector = true;
}
if (needsNewInjector) {
// If we were looping we need to stop that so new changes are applied
currentProps.soundInjector.stop();
currentProps.soundInjector = null;
currentProps.readyToPlay = true;
}
}
Script.setInterval(update, UPDATE_TIME);

View file

@ -0,0 +1,57 @@
//
// acAudioSearchCompatibleEntitySpawner.js
// audio/acAudioSearching
//
// Created by Eric Levin 2/2/2016
// Copyright 2016 High Fidelity, Inc.
// This is a client script which spawns entities with a field in userData compatible with the AcAudioSearchAndInject script
// These entities specify data about the sound they want to play, such as url, volume, and whether to loop or not
// The position of the entity determines the position from which the sound plays from
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
Script.include("../../libraries/utils.js");
var orientation = Camera.getOrientation();
orientation = Quat.safeEulerAngles(orientation);
orientation.x = 0;
orientation = Quat.fromVec3Degrees(orientation);
var center = Vec3.sum(MyAvatar.position, Vec3.multiply(3, Quat.getFront(orientation)));
// http://hifi-public.s3.amazonaws.com/ryan/demo/0619_Fireplace__Tree_B.L.wav
var SOUND_DATA_KEY = "soundKey";
var userData = {
soundKey: {
url: "http://hifi-content.s3.amazonaws.com/DomainContent/Junkyard/Sounds/ClothSail/cloth_sail3.L.wav",
volume: 0.3,
loop: false,
playbackGap: 2000, // In ms - time to wait in between clip plays
playbackGapRange: 500 // In ms - the range to wait in between clip plays
}
}
var entityProps = {
type: "Box",
position: center,
color: {
red: 200,
green: 10,
blue: 200
},
dimensions: {
x: 0.1,
y: 0.1,
z: 0.1
},
userData: JSON.stringify(userData)
}
var soundEntity = Entities.addEntity(entityProps);
function cleanup() {
Entities.deleteEntity(soundEntity);
}
Script.scriptEnding.connect(cleanup);

View file

@ -447,7 +447,7 @@
"interpType": "snapshotPrev",
"transitions": [
{ "var": "isAway", "state": "awayIntro" },
{ "var": "isNotInAir", "state": "idle" }
{ "var": "isNotInAir", "state": "landStandImpact" }
]
},
{
@ -457,7 +457,51 @@
"interpType": "snapshotPrev",
"transitions": [
{ "var": "isAway", "state": "awayIntro" },
{ "var": "isNotInAir", "state": "idle" }
{ "var": "isNotInAir", "state": "landRun" }
]
},
{
"id": "landStandImpact",
"interpTarget": 6,
"interpDuration": 4,
"transitions": [
{ "var": "isAway", "state": "awayIntro" },
{ "var": "isFlying", "state": "fly" },
{ "var": "isTakeoffStand", "state": "takeoffStand" },
{ "var": "isTakeoffRun", "state": "takeoffRun" },
{ "var": "landStandImpactOnDone", "state": "landStand" }
]
},
{
"id": "landStand",
"interpTarget": 0,
"interpDuration": 1,
"transitions": [
{ "var": "isMovingForward", "state": "idleToWalkFwd" },
{ "var": "isMovingBackward", "state": "walkBwd" },
{ "var": "isMovingRight", "state": "strafeRight" },
{ "var": "isMovingLeft", "state": "strafeLeft" },
{ "var": "isTurningRight", "state": "turnRight" },
{ "var": "isTurningLeft", "state": "turnLeft" },
{ "var": "isAway", "state": "awayIntro" },
{ "var": "isFlying", "state": "fly" },
{ "var": "isTakeoffStand", "state": "takeoffStand" },
{ "var": "isTakeoffRun", "state": "takeoffRun" },
{ "var": "isInAirStand", "state": "inAirStand" },
{ "var": "isInAirRun", "state": "inAirRun" },
{ "var": "landStandOnDone", "state": "idle" }
]
},
{
"id": "landRun",
"interpTarget": 1,
"interpDuration": 7,
"transitions": [
{ "var": "isAway", "state": "awayIntro" },
{ "var": "isFlying", "state": "fly" },
{ "var": "isTakeoffStand", "state": "takeoffStand" },
{ "var": "isTakeoffRun", "state": "takeoffRun" },
{ "var": "landRunOnDone", "state": "walkFwd" }
]
}
]
@ -873,6 +917,42 @@
"children": []
}
]
},
{
"id": "landStandImpact",
"type": "clip",
"data": {
"url": "https://hifi-content.s3.amazonaws.com/ozan/dev/anim/standard_anims_160127/jump_standing_land.fbx",
"startFrame": 1.0,
"endFrame": 6.0,
"timeScale": 1.0,
"loopFlag": false
},
"children": []
},
{
"id": "landStand",
"type": "clip",
"data": {
"url": "https://hifi-content.s3.amazonaws.com/ozan/dev/anim/standard_anims_160127/jump_standing_land.fbx",
"startFrame": 6.0,
"endFrame": 28.0,
"timeScale": 1.0,
"loopFlag": false
},
"children": []
},
{
"id": "landRun",
"type": "clip",
"data": {
"url": "https://hifi-content.s3.amazonaws.com/ozan/dev/anim/standard_anims_160127/jump_land.fbx",
"startFrame": 1.0,
"endFrame": 6.0,
"timeScale": 0.65,
"loopFlag": false
},
"children": []
}
]
}

View file

@ -181,7 +181,11 @@ ModalWindow {
upButton.enabled = Qt.binding(function() { return (model.parentFolder && model.parentFolder != "") ? true : false; });
showFiles = !root.selectDirectory
}
onFolderChanged: fileTableView.currentRow = 0;
onFolderChanged: {
fileTableView.selection.clear();
fileTableView.selection.select(0);
fileTableView.currentRow = 0;
}
}
function navigateToRow(row) {
@ -199,6 +203,59 @@ ModalWindow {
okAction.trigger();
}
}
property string prefix: ""
function addToPrefix(event) {
if (!event.text || event.text === "") {
return false;
}
var newPrefix = prefix + event.text.toLowerCase();
var matchedIndex = -1;
for (var i = 0; i < model.count; ++i) {
var name = model.get(i, "fileName").toLowerCase();
if (0 === name.indexOf(newPrefix)) {
matchedIndex = i;
break;
}
}
if (matchedIndex !== -1) {
fileTableView.selection.clear();
fileTableView.selection.select(matchedIndex);
fileTableView.currentRow = matchedIndex;
fileTableView.prefix = newPrefix;
}
prefixClearTimer.restart();
return true;
}
Timer {
id: prefixClearTimer
interval: 1000
repeat: false
running: false
onTriggered: fileTableView.prefix = "";
}
Keys.onPressed: {
switch (event.key) {
case Qt.Key_Backspace:
case Qt.Key_Tab:
case Qt.Key_Backtab:
event.accepted = false;
break;
default:
if (addToPrefix(event)) {
event.accepted = true
} else {
event.accepted = false;
}
break;
}
}
}
TextField {

View file

@ -3,6 +3,11 @@ import QtQuick.Controls 1.4
TableView {
id: root
onActiveFocusChanged: {
if (activeFocus && currentRow == -1) {
root.selection.select(0)
}
}
itemDelegate: Component {
Item {

View file

@ -90,6 +90,7 @@ Window {
}
ScrollView {
onActiveFocusChanged: if (activeFocus && listView.currentItem) { listView.currentItem.forceActiveFocus(); }
anchors {
top: allButtons.bottom;
left: parent.left;
@ -103,54 +104,57 @@ Window {
id: listView
clip: true
anchors { fill: parent; margins: 0 }
model: runningScriptsModel
delegate: Rectangle {
id: rectangle
clip: true
radius: 3
delegate: FocusScope {
id: scope
anchors { left: parent.left; right: parent.right }
height: scriptName.height + 12 + (ListView.isCurrentItem ? scriptName.height + 6 : 0)
color: ListView.isCurrentItem ? "#39f" :
index % 2 ? "#ddd" : "#eee"
Text {
id: scriptName
anchors { left: parent.left; leftMargin: 4; top: parent.top; topMargin:6 }
text: name
}
Text {
id: scriptUrl
anchors { left: scriptName.left; right: parent.right; rightMargin: 4; top: scriptName.bottom; topMargin: 6 }
text: url
elide: Text.ElideMiddle
}
MouseArea {
Keys.onDownPressed: listView.incrementCurrentIndex()
Keys.onUpPressed: listView.decrementCurrentIndex()
Rectangle {
id: rectangle
anchors.fill: parent
onClicked: listView.currentIndex = index
}
clip: true
radius: 3
color: scope.ListView.isCurrentItem ? "#79f" :
index % 2 ? "#ddd" : "#eee"
Row {
anchors.verticalCenter: scriptName.verticalCenter
anchors.right: parent.right
anchors.rightMargin: 4
spacing: 4
HifiControls.FontAwesome {
text: "\uf021"; size: scriptName.height;
MouseArea {
anchors { fill: parent; margins: -2; }
onClicked: reloadScript(model.url)
}
Text {
id: scriptName
anchors { left: parent.left; leftMargin: 4; top: parent.top; topMargin:6 }
text: name
}
HifiControls.FontAwesome {
size: scriptName.height; text: "\uf00d"
MouseArea {
anchors { fill: parent; margins: -2; }
onClicked: stopScript(model.url)
Text {
id: scriptUrl
anchors { left: scriptName.left; right: parent.right; rightMargin: 4; top: scriptName.bottom; topMargin: 6 }
text: url
elide: Text.ElideMiddle
}
MouseArea {
anchors.fill: parent
onClicked: { listView.currentIndex = index; scope.forceActiveFocus(); }
}
Row {
anchors.verticalCenter: scriptName.verticalCenter
anchors.right: parent.right
anchors.rightMargin: 4
spacing: 4
HifiControls.FontAwesome {
text: "\uf021"; size: scriptName.height;
MouseArea {
anchors { fill: parent; margins: -2; }
onClicked: reloadScript(model.url)
}
}
HifiControls.FontAwesome {
size: scriptName.height; text: "\uf00d"
MouseArea {
anchors { fill: parent; margins: -2; }
onClicked: stopScript(model.url)
}
}
}
}
@ -220,6 +224,7 @@ Window {
anchors.bottom: treeView.top
anchors.bottomMargin: 8
placeholderText: "filter"
focus: true
onTextChanged: scriptsModel.filterRegExp = new RegExp("^.*" + text + ".*$", "i")
Component.onCompleted: scriptsModel.filterRegExp = new RegExp("^.*$", "i")
}
@ -232,7 +237,6 @@ Window {
anchors.left: parent.left
anchors.right: parent.right
headerVisible: false
focus: true
// FIXME doesn't work?
onDoubleClicked: isExpanded(index) ? collapse(index) : expand(index)
// FIXME not triggered by double click?
@ -258,14 +262,14 @@ Window {
console.log("Desktop size " + Qt.size(desktop.width, desktop.height));
}
TableViewColumn {
TableViewColumn {
title: "Name";
role: "display";
// delegate: Text {
// text: styleData.value
// renderType: Text.QtRendering
// elite: styleData.elideMode
// }
// delegate: Text {
// text: styleData.value
// renderType: Text.QtRendering
// elite: styleData.elideMode
// }
}
}

View file

@ -57,6 +57,8 @@
#include <gl/Config.h>
#include <gl/QOpenGLContextWrapper.h>
#include <shared/JSONHelpers.h>
#include <ResourceScriptingInterface.h>
#include <AccountManager.h>
#include <AddressManager.h>

View file

@ -240,6 +240,8 @@ public:
glm::quat getCustomListenOrientation() { return _customListenOrientation; }
void setCustomListenOrientation(glm::quat customListenOrientation) { _customListenOrientation = customListenOrientation; }
virtual void rebuildCollisionShape() override;
public slots:
void increaseSize();
void decreaseSize();
@ -256,8 +258,6 @@ public slots:
Q_INVOKABLE void updateMotionBehaviorFromMenu();
virtual void rebuildCollisionShape() override;
Q_INVOKABLE QUrl getAnimGraphUrl() const { return _animGraphUrl; }
void setEnableDebugDrawDefaultPose(bool isEnabled);

View file

@ -105,7 +105,6 @@ void Circle3DOverlay::render(RenderArgs* args) {
auto transform = _transform;
transform.postScale(glm::vec3(getDimensions(), 1.0f));
batch.setModelTransform(transform);
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch, false, false);
// for our overlay, is solid means we draw a ring between the inner and outer radius of the circle, otherwise
// we just draw a line...
@ -278,6 +277,14 @@ void Circle3DOverlay::render(RenderArgs* args) {
}
}
const render::ShapeKey Circle3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder().withoutCullFace();
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
void Circle3DOverlay::setProperties(const QScriptValue &properties) {
Planar3DOverlay::setProperties(properties);

View file

@ -25,6 +25,7 @@ public:
Circle3DOverlay(const Circle3DOverlay* circle3DOverlay);
virtual void render(RenderArgs* args);
virtual const render::ShapeKey getShapeKey() override;
virtual void setProperties(const QScriptValue& properties);
virtual QScriptValue getProperty(const QString& property);

View file

@ -46,22 +46,17 @@ void Cube3DOverlay::render(RenderArgs* args) {
Transform transform;
transform.setTranslation(position);
transform.setRotation(rotation);
if (_isSolid) {
// if (_borderSize > 0) {
// // Draw a cube at a larger size behind the main cube, creating
// // a border effect.
// // Disable writing to the depth mask so that the "border" cube will not
// // occlude the main cube. This means the border could be covered by
// // overlays that are further back and drawn later, but this is good
// // enough for the use-case.
// transform.setScale(dimensions * _borderSize);
// batch->setModelTransform(transform);
// DependencyManager::get<GeometryCache>()->renderSolidCube(*batch, 1.0f, glm::vec4(1.0f, 1.0f, 1.0f, alpha));
// }
auto geometryCache = DependencyManager::get<GeometryCache>();
auto pipeline = args->_pipeline;
if (!pipeline) {
pipeline = geometryCache->getShapePipeline();
}
if (_isSolid) {
transform.setScale(dimensions);
batch->setModelTransform(transform);
DependencyManager::get<GeometryCache>()->renderSolidCubeInstance(*batch, cubeColor);
geometryCache->renderSolidCubeInstance(*batch, cubeColor, pipeline);
} else {
if (getIsDashedLine()) {
@ -79,8 +74,6 @@ void Cube3DOverlay::render(RenderArgs* args) {
glm::vec3 topLeftFar(-halfDimensions.x, halfDimensions.y, halfDimensions.z);
glm::vec3 topRightFar(halfDimensions.x, halfDimensions.y, halfDimensions.z);
auto geometryCache = DependencyManager::get<GeometryCache>();
geometryCache->renderDashedLine(*batch, bottomLeftNear, bottomRightNear, cubeColor);
geometryCache->renderDashedLine(*batch, bottomRightNear, bottomRightFar, cubeColor);
geometryCache->renderDashedLine(*batch, bottomRightFar, bottomLeftFar, cubeColor);
@ -99,12 +92,20 @@ void Cube3DOverlay::render(RenderArgs* args) {
} else {
transform.setScale(dimensions);
batch->setModelTransform(transform);
DependencyManager::get<GeometryCache>()->renderWireCubeInstance(*batch, cubeColor);
geometryCache->renderWireCubeInstance(*batch, cubeColor, pipeline);
}
}
}
}
const render::ShapeKey Cube3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder();
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
Cube3DOverlay* Cube3DOverlay::createClone() const {
return new Cube3DOverlay(this);
}

View file

@ -24,6 +24,7 @@ public:
Cube3DOverlay(const Cube3DOverlay* cube3DOverlay);
virtual void render(RenderArgs* args);
virtual const render::ShapeKey getShapeKey() override;
virtual Cube3DOverlay* createClone() const;

View file

@ -93,6 +93,14 @@ void Grid3DOverlay::render(RenderArgs* args) {
}
}
const render::ShapeKey Grid3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder();
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
void Grid3DOverlay::setProperties(const QScriptValue& properties) {
Planar3DOverlay::setProperties(properties);

View file

@ -25,6 +25,7 @@ public:
Grid3DOverlay(const Grid3DOverlay* grid3DOverlay);
virtual void render(RenderArgs* args);
virtual const render::ShapeKey getShapeKey() override;
virtual void setProperties(const QScriptValue& properties);
virtual QScriptValue getProperty(const QString& property);

View file

@ -95,7 +95,6 @@ void Image3DOverlay::render(RenderArgs* args) {
batch->setModelTransform(transform);
batch->setResourceTexture(0, _texture->getGPUTexture());
DependencyManager::get<GeometryCache>()->bindSimpleProgram(*batch, true, false, _emissive, true);
DependencyManager::get<GeometryCache>()->renderQuad(
*batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight,
glm::vec4(color.red / MAX_COLOR, color.green / MAX_COLOR, color.blue / MAX_COLOR, alpha)
@ -104,6 +103,17 @@ void Image3DOverlay::render(RenderArgs* args) {
batch->setResourceTexture(0, args->_whiteTexture); // restore default white color after me
}
const render::ShapeKey Image3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder().withoutCullFace().withDepthBias();
if (_emissive) {
builder.withEmissive();
}
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
void Image3DOverlay::setProperties(const QScriptValue &properties) {
Billboard3DOverlay::setProperties(properties);

View file

@ -31,6 +31,8 @@ public:
virtual void update(float deltatime);
virtual const render::ShapeKey getShapeKey() override;
// setters
void setURL(const QString& url);
void setClipFromSource(const QRect& bounds) { _fromImage = bounds; }

View file

@ -53,7 +53,6 @@ void Line3DOverlay::render(RenderArgs* args) {
auto batch = args->_batch;
if (batch) {
batch->setModelTransform(_transform);
DependencyManager::get<GeometryCache>()->bindSimpleProgram(*batch);
if (getIsDashedLine()) {
// TODO: add support for color to renderDashedLine()
@ -64,6 +63,14 @@ void Line3DOverlay::render(RenderArgs* args) {
}
}
const render::ShapeKey Line3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder();
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
void Line3DOverlay::setProperties(const QScriptValue& properties) {
Base3DOverlay::setProperties(properties);

View file

@ -24,6 +24,7 @@ public:
Line3DOverlay(const Line3DOverlay* line3DOverlay);
~Line3DOverlay();
virtual void render(RenderArgs* args);
virtual const render::ShapeKey getShapeKey() override;
virtual AABox getBounds() const;
// getters

View file

@ -44,6 +44,8 @@ public:
virtual bool addToScene(Overlay::Pointer overlay, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges);
virtual void removeFromScene(Overlay::Pointer overlay, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges);
virtual const render::ShapeKey getShapeKey() { return render::ShapeKey::Builder::ownPipeline(); }
// getters
virtual QString getType() const = 0;
virtual bool is3D() const = 0;
@ -119,6 +121,7 @@ namespace render {
template <> const Item::Bound payloadGetBound(const Overlay::Pointer& overlay);
template <> int payloadGetLayer(const Overlay::Pointer& overlay);
template <> void payloadRender(const Overlay::Pointer& overlay, RenderArgs* args);
template <> const ShapeKey shapeGetShapeKey(const Overlay::Pointer& overlay);
}

View file

@ -35,15 +35,18 @@
namespace render {
template <> const ItemKey payloadGetKey(const Overlay::Pointer& overlay) {
auto builder = ItemKey::Builder().withTypeShape();
if (overlay->is3D() && !std::dynamic_pointer_cast<Base3DOverlay>(overlay)->getDrawOnHUD()) {
if (std::dynamic_pointer_cast<Base3DOverlay>(overlay)->getDrawInFront()) {
return ItemKey::Builder().withTypeShape().withLayered().build();
} else {
return ItemKey::Builder::opaqueShape();
builder.withLayered();
}
if (overlay->getAlpha() != 1.0f) {
builder.withTransparent();
}
} else {
return ItemKey::Builder().withTypeShape().withViewSpace().build();
builder.withViewSpace();
}
return builder.build();
}
template <> const Item::Bound payloadGetBound(const Overlay::Pointer& overlay) {
return overlay->getBounds();
@ -80,4 +83,7 @@ namespace render {
}
}
}
template <> const ShapeKey shapeGetShapeKey(const Overlay::Pointer& overlay) {
return overlay->getShapeKey();
}
}

View file

@ -88,6 +88,14 @@ void Rectangle3DOverlay::render(RenderArgs* args) {
}
}
const render::ShapeKey Rectangle3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder();
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
void Rectangle3DOverlay::setProperties(const QScriptValue &properties) {
Planar3DOverlay::setProperties(properties);
}

View file

@ -24,6 +24,7 @@ public:
Rectangle3DOverlay(const Rectangle3DOverlay* rectangle3DOverlay);
~Rectangle3DOverlay();
virtual void render(RenderArgs* args);
virtual const render::ShapeKey getShapeKey() override;
virtual void setProperties(const QScriptValue& properties);
virtual Rectangle3DOverlay* createClone() const;

View file

@ -42,14 +42,29 @@ void Sphere3DOverlay::render(RenderArgs* args) {
Transform transform = _transform;
transform.postScale(getDimensions() * SPHERE_OVERLAY_SCALE);
batch->setModelTransform(transform);
auto geometryCache = DependencyManager::get<GeometryCache>();
auto pipeline = args->_pipeline;
if (!pipeline) {
pipeline = geometryCache->getShapePipeline();
}
if (_isSolid) {
DependencyManager::get<GeometryCache>()->renderSolidSphereInstance(*batch, sphereColor);
geometryCache->renderSolidSphereInstance(*batch, sphereColor, pipeline);
} else {
DependencyManager::get<GeometryCache>()->renderWireSphereInstance(*batch, sphereColor);
geometryCache->renderWireSphereInstance(*batch, sphereColor, pipeline);
}
}
}
const render::ShapeKey Sphere3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder();
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
Sphere3DOverlay* Sphere3DOverlay::createClone() const {
return new Sphere3DOverlay(this);
}

View file

@ -24,6 +24,7 @@ public:
Sphere3DOverlay(const Sphere3DOverlay* Sphere3DOverlay);
virtual void render(RenderArgs* args);
virtual const render::ShapeKey getShapeKey() override;
virtual Sphere3DOverlay* createClone() const;
};

View file

@ -8,8 +8,10 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "Text3DOverlay.h"
#include <TextureCache.h>
#include <GeometryCache.h>
#include <RegisteredMetaTypes.h>
#include <RenderDeferredTask.h>
@ -34,6 +36,7 @@ Text3DOverlay::Text3DOverlay() :
_bottomMargin(DEFAULT_MARGIN)
{
_textRenderer = TextRenderer3D::getInstance(SANS_FONT_FAMILY, FIXED_FONT_POINT_SIZE);
_alpha = _backgroundAlpha;
}
Text3DOverlay::Text3DOverlay(const Text3DOverlay* text3DOverlay) :
@ -47,7 +50,8 @@ Text3DOverlay::Text3DOverlay(const Text3DOverlay* text3DOverlay) :
_rightMargin(text3DOverlay->_rightMargin),
_bottomMargin(text3DOverlay->_bottomMargin)
{
_textRenderer = TextRenderer3D::getInstance(SANS_FONT_FAMILY, FIXED_FONT_POINT_SIZE);
_textRenderer = TextRenderer3D::getInstance(SANS_FONT_FAMILY, FIXED_FONT_POINT_SIZE);
_alpha = _backgroundAlpha;
}
Text3DOverlay::~Text3DOverlay() {
@ -100,7 +104,6 @@ void Text3DOverlay::render(RenderArgs* args) {
glm::vec3 topLeft(-halfDimensions.x, -halfDimensions.y, SLIGHTLY_BEHIND);
glm::vec3 bottomRight(halfDimensions.x, halfDimensions.y, SLIGHTLY_BEHIND);
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch, false, true, false, true);
DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, quadColor);
// Same font properties as textSize()
@ -120,7 +123,15 @@ void Text3DOverlay::render(RenderArgs* args) {
glm::vec4 textColor = { _color.red / MAX_COLOR, _color.green / MAX_COLOR,
_color.blue / MAX_COLOR, getAlpha() };
_textRenderer->draw(batch, 0, 0, _text, textColor);
_textRenderer->draw(batch, 0, 0, _text, textColor, glm::vec2(-1.0f), getDrawInFront());
}
const render::ShapeKey Text3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder();
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
void Text3DOverlay::setProperties(const QScriptValue& properties) {
@ -145,6 +156,7 @@ void Text3DOverlay::setProperties(const QScriptValue& properties) {
if (properties.property("backgroundAlpha").isValid()) {
_backgroundAlpha = properties.property("backgroundAlpha").toVariant().toFloat();
_alpha = _backgroundAlpha;
}
if (properties.property("lineHeight").isValid()) {

View file

@ -31,6 +31,8 @@ public:
virtual void update(float deltatime);
virtual const render::ShapeKey getShapeKey() override;
// getters
const QString& getText() const { return _text; }
float getLineHeight() const { return _lineHeight; }
@ -72,5 +74,4 @@ private:
float _bottomMargin;
};
#endif // hifi_Text3DOverlay_h

View file

@ -34,7 +34,7 @@ public:
protected:
// Centered local bounding box
AABox _localBoundingBox;
AABox _localBoundingBox{ vec3(0.0f), 1.0f };
};

View file

@ -101,11 +101,18 @@ void Web3DOverlay::render(RenderArgs* args) {
}
batch.setModelTransform(transform);
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch, true, false, false, true);
DependencyManager::get<GeometryCache>()->renderQuad(batch, halfSize * -1.0f, halfSize, vec2(0), vec2(1), color);
batch.setResourceTexture(0, args->_whiteTexture); // restore default white color after me
}
const render::ShapeKey Web3DOverlay::getShapeKey() {
auto builder = render::ShapeKey::Builder().withoutCullFace().withDepthBias();
if (getAlpha() != 1.0f) {
builder.withTranslucent();
}
return builder.build();
}
void Web3DOverlay::setProperties(const QScriptValue &properties) {
Billboard3DOverlay::setProperties(properties);

View file

@ -25,6 +25,7 @@ public:
virtual ~Web3DOverlay();
virtual void render(RenderArgs* args);
virtual const render::ShapeKey getShapeKey() override;
virtual void update(float deltatime);

View file

@ -125,7 +125,7 @@ void AnimStateMachine::switchState(const AnimVariantMap& animVars, State::Pointe
assert(false);
}
#if WANT_DEBUG
#ifdef WANT_DEBUG
qCDebug(animation) << "AnimStateMachine::switchState:" << _currentState->getID() << "->" << desiredState->getID() << "duration =" << duration << "targetFrame =" << desiredState->_interpTarget << "interpType = " << (int)_interpType;
#endif

View file

@ -641,7 +641,8 @@ void Rig::computeMotionAnimationState(float deltaTime, const glm::vec3& worldPos
_desiredStateAge += deltaTime;
if (_state == RigRole::Move) {
if (glm::length(localVel) > MOVE_ENTER_SPEED_THRESHOLD) {
glm::vec3 horizontalVel = localVel - glm::vec3(0.0f, localVel.y, 0.0f);
if (glm::length(horizontalVel) > MOVE_ENTER_SPEED_THRESHOLD) {
if (fabsf(forwardSpeed) > 0.5f * fabsf(lateralSpeed)) {
if (forwardSpeed > 0.0f) {
// forward
@ -676,18 +677,19 @@ void Rig::computeMotionAnimationState(float deltaTime, const glm::vec3& worldPos
_animVars.set("isNotMoving", false);
}
}
_animVars.set("isTurningLeft", false);
_animVars.set("isTurningRight", false);
_animVars.set("isNotTurning", true);
_animVars.set("isFlying", false);
_animVars.set("isNotFlying", true);
_animVars.set("isTakeoffStand", false);
_animVars.set("isTakeoffRun", false);
_animVars.set("isNotTakeoff", true);
_animVars.set("isInAirStand", false);
_animVars.set("isInAirRun", false);
_animVars.set("isNotInAir", true);
}
_animVars.set("isTurningLeft", false);
_animVars.set("isTurningRight", false);
_animVars.set("isNotTurning", true);
_animVars.set("isFlying", false);
_animVars.set("isNotFlying", true);
_animVars.set("isTakeoffStand", false);
_animVars.set("isTakeoffRun", false);
_animVars.set("isNotTakeoff", true);
_animVars.set("isInAirStand", false);
_animVars.set("isInAirRun", false);
_animVars.set("isNotInAir", true);
} else if (_state == RigRole::Turn) {
if (turningSpeed > 0.0f) {
// turning right
@ -807,7 +809,7 @@ void Rig::computeMotionAnimationState(float deltaTime, const glm::vec3& worldPos
// compute blend based on velocity
const float JUMP_SPEED = 3.5f;
float alpha = glm::clamp(-worldVelocity.y / JUMP_SPEED, -1.0f, 1.0f) + 1.0f;
float alpha = glm::clamp(-_lastWorldVelocity.y / JUMP_SPEED, -1.0f, 1.0f) + 1.0f;
_animVars.set("inAirAlpha", alpha);
}
@ -825,6 +827,7 @@ void Rig::computeMotionAnimationState(float deltaTime, const glm::vec3& worldPos
_lastFront = front;
_lastPosition = worldPosition;
_lastWorldVelocity = worldVelocity;
}
// Allow script to add/remove handlers and report results, from within their thread.

View file

@ -267,6 +267,7 @@ public:
glm::vec3 _lastFront;
glm::vec3 _lastPosition;
glm::vec3 _lastVelocity;
glm::vec3 _lastWorldVelocity;
glm::vec3 _eyesInRootFrame { Vectors::ZERO };
QUrl _animGraphURL;

View file

@ -0,0 +1,680 @@
//
// AudioHRTF.cpp
// libraries/audio/src
//
// Created by Ken Cooke on 1/17/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <math.h>
#include <stdint.h>
#include <string.h>
#include <assert.h>
#include "AudioHRTF.h"
#include "AudioHRTFData.h"
//
// Equal-gain crossfade
//
// Cos(x)^2 window minimizes the modulation sidebands when a pure tone is panned.
// Transients in the time-varying Thiran allpass filter are eliminated by the initial delay.
// Valimaki, Laakso. "Elimination of Transients in Time-Varying Allpass Fractional Delay Filters"
//
static const float crossfadeTable[HRTF_BLOCK] = {
1.0000000000f, 1.0000000000f, 1.0000000000f, 1.0000000000f, 1.0000000000f, 0.9999611462f, 0.9998445910f, 0.9996503524f,
0.9993784606f, 0.9990289579f, 0.9986018986f, 0.9980973490f, 0.9975153877f, 0.9968561049f, 0.9961196033f, 0.9953059972f,
0.9944154131f, 0.9934479894f, 0.9924038765f, 0.9912832366f, 0.9900862439f, 0.9888130845f, 0.9874639561f, 0.9860390685f,
0.9845386431f, 0.9829629131f, 0.9813121235f, 0.9795865307f, 0.9777864029f, 0.9759120199f, 0.9739636731f, 0.9719416652f,
0.9698463104f, 0.9676779344f, 0.9654368743f, 0.9631234783f, 0.9607381059f, 0.9582811279f, 0.9557529262f, 0.9531538935f,
0.9504844340f, 0.9477449623f, 0.9449359044f, 0.9420576968f, 0.9391107867f, 0.9360956322f, 0.9330127019f, 0.9298624749f,
0.9266454408f, 0.9233620996f, 0.9200129616f, 0.9165985472f, 0.9131193872f, 0.9095760221f, 0.9059690029f, 0.9022988899f,
0.8985662536f, 0.8947716742f, 0.8909157412f, 0.8869990541f, 0.8830222216f, 0.8789858616f, 0.8748906015f, 0.8707370778f,
0.8665259359f, 0.8622578304f, 0.8579334246f, 0.8535533906f, 0.8491184090f, 0.8446291692f, 0.8400863689f, 0.8354907140f,
0.8308429188f, 0.8261437056f, 0.8213938048f, 0.8165939546f, 0.8117449009f, 0.8068473974f, 0.8019022052f, 0.7969100928f,
0.7918718361f, 0.7867882182f, 0.7816600290f, 0.7764880657f, 0.7712731319f, 0.7660160383f, 0.7607176017f, 0.7553786457f,
0.7500000000f, 0.7445825006f, 0.7391269893f, 0.7336343141f, 0.7281053287f, 0.7225408922f, 0.7169418696f, 0.7113091309f,
0.7056435516f, 0.6999460122f, 0.6942173981f, 0.6884585998f, 0.6826705122f, 0.6768540348f, 0.6710100717f, 0.6651395310f,
0.6592433251f, 0.6533223705f, 0.6473775872f, 0.6414098993f, 0.6354202341f, 0.6294095226f, 0.6233786988f, 0.6173287002f,
0.6112604670f, 0.6051749422f, 0.5990730716f, 0.5929558036f, 0.5868240888f, 0.5806788803f, 0.5745211331f, 0.5683518042f,
0.5621718523f, 0.5559822381f, 0.5497839233f, 0.5435778714f, 0.5373650468f, 0.5311464151f, 0.5249229428f, 0.5186955971f,
0.5124653459f, 0.5062331573f, 0.5000000000f, 0.4937668427f, 0.4875346541f, 0.4813044029f, 0.4750770572f, 0.4688535849f,
0.4626349532f, 0.4564221286f, 0.4502160767f, 0.4440177619f, 0.4378281477f, 0.4316481958f, 0.4254788669f, 0.4193211197f,
0.4131759112f, 0.4070441964f, 0.4009269284f, 0.3948250578f, 0.3887395330f, 0.3826712998f, 0.3766213012f, 0.3705904774f,
0.3645797659f, 0.3585901007f, 0.3526224128f, 0.3466776295f, 0.3407566749f, 0.3348604690f, 0.3289899283f, 0.3231459652f,
0.3173294878f, 0.3115414002f, 0.3057826019f, 0.3000539878f, 0.2943564484f, 0.2886908691f, 0.2830581304f, 0.2774591078f,
0.2718946713f, 0.2663656859f, 0.2608730107f, 0.2554174994f, 0.2500000000f, 0.2446213543f, 0.2392823983f, 0.2339839617f,
0.2287268681f, 0.2235119343f, 0.2183399710f, 0.2132117818f, 0.2081281639f, 0.2030899072f, 0.1980977948f, 0.1931526026f,
0.1882550991f, 0.1834060454f, 0.1786061952f, 0.1738562944f, 0.1691570812f, 0.1645092860f, 0.1599136311f, 0.1553708308f,
0.1508815910f, 0.1464466094f, 0.1420665754f, 0.1377421696f, 0.1334740641f, 0.1292629222f, 0.1251093985f, 0.1210141384f,
0.1169777784f, 0.1130009459f, 0.1090842588f, 0.1052283258f, 0.1014337464f, 0.0977011101f, 0.0940309971f, 0.0904239779f,
0.0868806128f, 0.0834014528f, 0.0799870384f, 0.0766379004f, 0.0733545592f, 0.0701375251f, 0.0669872981f, 0.0639043678f,
0.0608892133f, 0.0579423032f, 0.0550640956f, 0.0522550377f, 0.0495155660f, 0.0468461065f, 0.0442470738f, 0.0417188721f,
0.0392618941f, 0.0368765217f, 0.0345631257f, 0.0323220656f, 0.0301536896f, 0.0280583348f, 0.0260363269f, 0.0240879801f,
0.0222135971f, 0.0204134693f, 0.0186878765f, 0.0170370869f, 0.0154613569f, 0.0139609315f, 0.0125360439f, 0.0111869155f,
0.0099137561f, 0.0087167634f, 0.0075961235f, 0.0065520106f, 0.0055845869f, 0.0046940028f, 0.0038803967f, 0.0031438951f,
0.0024846123f, 0.0019026510f, 0.0013981014f, 0.0009710421f, 0.0006215394f, 0.0003496476f, 0.0001554090f, 0.0000388538f,
};
//
// on x86 architecture, assume that SSE2 is present
//
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__x86_64__)
#include <emmintrin.h>
// 1 channel input, 4 channel output
static void FIR_1x4_SSE(float* src, float* dst0, float* dst1, float* dst2, float* dst3, float coef[4][HRTF_TAPS], int numFrames) {
float* coef0 = coef[0] + HRTF_TAPS - 1; // process backwards
float* coef1 = coef[1] + HRTF_TAPS - 1;
float* coef2 = coef[2] + HRTF_TAPS - 1;
float* coef3 = coef[3] + HRTF_TAPS - 1;
assert(numFrames % 4 == 0);
for (int i = 0; i < numFrames; i += 4) {
__m128 acc0 = _mm_setzero_ps();
__m128 acc1 = _mm_setzero_ps();
__m128 acc2 = _mm_setzero_ps();
__m128 acc3 = _mm_setzero_ps();
float* ps = &src[i - HRTF_TAPS + 1]; // process forwards
assert(HRTF_TAPS % 4 == 0);
for (int k = 0; k < HRTF_TAPS; k += 4) {
acc0 = _mm_add_ps(acc0, _mm_mul_ps(_mm_load1_ps(&coef0[-k-0]), _mm_loadu_ps(&ps[k+0])));
acc1 = _mm_add_ps(acc1, _mm_mul_ps(_mm_load1_ps(&coef1[-k-0]), _mm_loadu_ps(&ps[k+0])));
acc2 = _mm_add_ps(acc2, _mm_mul_ps(_mm_load1_ps(&coef2[-k-0]), _mm_loadu_ps(&ps[k+0])));
acc3 = _mm_add_ps(acc3, _mm_mul_ps(_mm_load1_ps(&coef3[-k-0]), _mm_loadu_ps(&ps[k+0])));
acc0 = _mm_add_ps(acc0, _mm_mul_ps(_mm_load1_ps(&coef0[-k-1]), _mm_loadu_ps(&ps[k+1])));
acc1 = _mm_add_ps(acc1, _mm_mul_ps(_mm_load1_ps(&coef1[-k-1]), _mm_loadu_ps(&ps[k+1])));
acc2 = _mm_add_ps(acc2, _mm_mul_ps(_mm_load1_ps(&coef2[-k-1]), _mm_loadu_ps(&ps[k+1])));
acc3 = _mm_add_ps(acc3, _mm_mul_ps(_mm_load1_ps(&coef3[-k-1]), _mm_loadu_ps(&ps[k+1])));
acc0 = _mm_add_ps(acc0, _mm_mul_ps(_mm_load1_ps(&coef0[-k-2]), _mm_loadu_ps(&ps[k+2])));
acc1 = _mm_add_ps(acc1, _mm_mul_ps(_mm_load1_ps(&coef1[-k-2]), _mm_loadu_ps(&ps[k+2])));
acc2 = _mm_add_ps(acc2, _mm_mul_ps(_mm_load1_ps(&coef2[-k-2]), _mm_loadu_ps(&ps[k+2])));
acc3 = _mm_add_ps(acc3, _mm_mul_ps(_mm_load1_ps(&coef3[-k-2]), _mm_loadu_ps(&ps[k+2])));
acc0 = _mm_add_ps(acc0, _mm_mul_ps(_mm_load1_ps(&coef0[-k-3]), _mm_loadu_ps(&ps[k+3])));
acc1 = _mm_add_ps(acc1, _mm_mul_ps(_mm_load1_ps(&coef1[-k-3]), _mm_loadu_ps(&ps[k+3])));
acc2 = _mm_add_ps(acc2, _mm_mul_ps(_mm_load1_ps(&coef2[-k-3]), _mm_loadu_ps(&ps[k+3])));
acc3 = _mm_add_ps(acc3, _mm_mul_ps(_mm_load1_ps(&coef3[-k-3]), _mm_loadu_ps(&ps[k+3])));
}
_mm_storeu_ps(&dst0[i], acc0);
_mm_storeu_ps(&dst1[i], acc1);
_mm_storeu_ps(&dst2[i], acc2);
_mm_storeu_ps(&dst3[i], acc3);
}
}
//
// Detect AVX/AVX2 support
//
#if defined(_MSC_VER)
#include <intrin.h>
static bool cpuSupportsAVX() {
int info[4];
int mask = (1 << 27) | (1 << 28); // OSXSAVE and AVX
__cpuidex(info, 0x1, 0);
bool result = false;
if ((info[2] & mask) == mask) {
if ((_xgetbv(_XCR_XFEATURE_ENABLED_MASK) & 0x6) == 0x6) {
result = true;
}
}
return result;
}
#elif defined(__GNU__)
static bool cpuSupportsAVX() {
return __builtin_cpu_supports("avx");
}
#else
static bool cpuSupportsAVX() {
return false;
}
#endif
//
// Runtime CPU dispatch
//
typedef void FIR_1x4_t(float* src, float* dst0, float* dst1, float* dst2, float* dst3, float coef[4][HRTF_TAPS], int numFrames);
FIR_1x4_t FIR_1x4_AVX; // separate compilation with VEX-encoding enabled
static void FIR_1x4(float* src, float* dst0, float* dst1, float* dst2, float* dst3, float coef[4][HRTF_TAPS], int numFrames) {
static FIR_1x4_t* f = cpuSupportsAVX() ? FIR_1x4_AVX : FIR_1x4_SSE; // init on first call
(*f)(src, dst0, dst1, dst2, dst3, coef, numFrames); // dispatch
}
// 4 channel planar to interleaved
static void interleave_4x4(float* src0, float* src1, float* src2, float* src3, float* dst, int numFrames) {
assert(numFrames % 4 == 0);
for (int i = 0; i < numFrames; i += 4) {
__m128 x0 = _mm_loadu_ps(&src0[i]);
__m128 x1 = _mm_loadu_ps(&src1[i]);
__m128 x2 = _mm_loadu_ps(&src2[i]);
__m128 x3 = _mm_loadu_ps(&src3[i]);
// interleave (4x4 matrix transpose)
__m128 t0 = _mm_unpacklo_ps(x0, x1);
__m128 t2 = _mm_unpacklo_ps(x2, x3);
__m128 t1 = _mm_unpackhi_ps(x0, x1);
__m128 t3 = _mm_unpackhi_ps(x2, x3);
x0 = _mm_movelh_ps(t0, t2);
x1 = _mm_movehl_ps(t2, t0);
x2 = _mm_movelh_ps(t1, t3);
x3 = _mm_movehl_ps(t3, t1);
_mm_storeu_ps(&dst[4*i+0], x0);
_mm_storeu_ps(&dst[4*i+4], x1);
_mm_storeu_ps(&dst[4*i+8], x2);
_mm_storeu_ps(&dst[4*i+12], x3);
}
}
// 4 channels (interleaved)
static void biquad_4x4(float* src, float* dst, float coef[5][4], float state[2][4], int numFrames) {
// enable flush-to-zero mode to prevent denormals
unsigned int ftz = _MM_GET_FLUSH_ZERO_MODE();
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
__m128 w1 = _mm_loadu_ps(state[0]);
__m128 w2 = _mm_loadu_ps(state[1]);
__m128 b0 = _mm_loadu_ps(coef[0]);
__m128 b1 = _mm_loadu_ps(coef[1]);
__m128 b2 = _mm_loadu_ps(coef[2]);
__m128 a1 = _mm_loadu_ps(coef[3]);
__m128 a2 = _mm_loadu_ps(coef[4]);
for (int i = 0; i < numFrames; i++) {
// transposed Direct Form II
__m128 x0 = _mm_loadu_ps(&src[4*i]);
__m128 y0;
y0 = _mm_add_ps(w1, _mm_mul_ps(x0, b0));
w1 = _mm_add_ps(w2, _mm_mul_ps(x0, b1));
w2 = _mm_mul_ps(x0, b2);
w1 = _mm_sub_ps(w1, _mm_mul_ps(y0, a1));
w2 = _mm_sub_ps(w2, _mm_mul_ps(y0, a2));
_mm_storeu_ps(&dst[4*i], y0);
}
// save state
_mm_storeu_ps(state[0], w1);
_mm_storeu_ps(state[1], w2);
_MM_SET_FLUSH_ZERO_MODE(ftz);
}
// crossfade 4 inputs into 2 outputs with accumulation (interleaved)
static void crossfade_4x2(float* src, float* dst, const float* win, int numFrames) {
assert(numFrames % 4 == 0);
for (int i = 0; i < numFrames; i += 4) {
__m128 f0 = _mm_loadu_ps(&win[i]);
__m128 x0 = _mm_loadu_ps(&src[4*i+0]);
__m128 x1 = _mm_loadu_ps(&src[4*i+4]);
__m128 x2 = _mm_loadu_ps(&src[4*i+8]);
__m128 x3 = _mm_loadu_ps(&src[4*i+12]);
__m128 y0 = _mm_loadu_ps(&dst[2*i+0]);
__m128 y1 = _mm_loadu_ps(&dst[2*i+4]);
// deinterleave (4x4 matrix transpose)
__m128 t0 = _mm_unpacklo_ps(x0, x1);
__m128 t2 = _mm_unpacklo_ps(x2, x3);
__m128 t1 = _mm_unpackhi_ps(x0, x1);
__m128 t3 = _mm_unpackhi_ps(x2, x3);
x0 = _mm_movelh_ps(t0, t2);
x1 = _mm_movehl_ps(t2, t0);
x2 = _mm_movelh_ps(t1, t3);
x3 = _mm_movehl_ps(t3, t1);
// crossfade
x0 = _mm_sub_ps(x0, x2);
x1 = _mm_sub_ps(x1, x3);
x2 = _mm_add_ps(x2, _mm_mul_ps(f0, x0));
x3 = _mm_add_ps(x3, _mm_mul_ps(f0, x1));
// interleave
x0 = _mm_unpacklo_ps(x2, x3);
x1 = _mm_unpackhi_ps(x2, x3);
// accumulate
y0 = _mm_add_ps(y0, x0);
y1 = _mm_add_ps(y1, x1);
_mm_storeu_ps(&dst[2*i+0], y0);
_mm_storeu_ps(&dst[2*i+4], y1);
}
}
// linear interpolation with gain
static void interpolate(float* dst, const float* src0, const float* src1, float frac, float gain) {
__m128 f0 = _mm_set1_ps(HRTF_GAIN * gain * (1.0f - frac));
__m128 f1 = _mm_set1_ps(HRTF_GAIN * gain * frac);
assert(HRTF_TAPS % 4 == 0);
for (int k = 0; k < HRTF_TAPS; k += 4) {
__m128 x0 = _mm_loadu_ps(&src0[k]);
__m128 x1 = _mm_loadu_ps(&src1[k]);
x0 = _mm_add_ps(_mm_mul_ps(f0, x0), _mm_mul_ps(f1, x1));
_mm_storeu_ps(&dst[k], x0);
}
}
#else // portable reference code
// 1 channel input, 4 channel output
static void FIR_1x4(float* src, float* dst0, float* dst1, float* dst2, float* dst3, float coef[4][HRTF_TAPS], int numFrames) {
float* coef0 = coef[0] + HRTF_TAPS - 1; // process backwards
float* coef1 = coef[1] + HRTF_TAPS - 1;
float* coef2 = coef[2] + HRTF_TAPS - 1;
float* coef3 = coef[3] + HRTF_TAPS - 1;
assert(numFrames % 4 == 0);
for (int i = 0; i < numFrames; i += 4) {
dst0[i+0] = 0.0f;
dst0[i+1] = 0.0f;
dst0[i+2] = 0.0f;
dst0[i+3] = 0.0f;
dst1[i+0] = 0.0f;
dst1[i+1] = 0.0f;
dst1[i+2] = 0.0f;
dst1[i+3] = 0.0f;
dst2[i+0] = 0.0f;
dst2[i+1] = 0.0f;
dst2[i+2] = 0.0f;
dst2[i+3] = 0.0f;
dst3[i+0] = 0.0f;
dst3[i+1] = 0.0f;
dst3[i+2] = 0.0f;
dst3[i+3] = 0.0f;
float* ps = &src[i - HRTF_TAPS + 1]; // process forwards
assert(HRTF_TAPS % 4 == 0);
for (int k = 0; k < HRTF_TAPS; k += 4) {
// channel 0
dst0[i+0] += coef0[-k-0] * ps[k+0] + coef0[-k-1] * ps[k+1] + coef0[-k-2] * ps[k+2] + coef0[-k-3] * ps[k+3];
dst0[i+1] += coef0[-k-0] * ps[k+1] + coef0[-k-1] * ps[k+2] + coef0[-k-2] * ps[k+3] + coef0[-k-3] * ps[k+4];
dst0[i+2] += coef0[-k-0] * ps[k+2] + coef0[-k-1] * ps[k+3] + coef0[-k-2] * ps[k+4] + coef0[-k-3] * ps[k+5];
dst0[i+3] += coef0[-k-0] * ps[k+3] + coef0[-k-1] * ps[k+4] + coef0[-k-2] * ps[k+5] + coef0[-k-3] * ps[k+6];
// channel 1
dst1[i+0] += coef1[-k-0] * ps[k+0] + coef1[-k-1] * ps[k+1] + coef1[-k-2] * ps[k+2] + coef1[-k-3] * ps[k+3];
dst1[i+1] += coef1[-k-0] * ps[k+1] + coef1[-k-1] * ps[k+2] + coef1[-k-2] * ps[k+3] + coef1[-k-3] * ps[k+4];
dst1[i+2] += coef1[-k-0] * ps[k+2] + coef1[-k-1] * ps[k+3] + coef1[-k-2] * ps[k+4] + coef1[-k-3] * ps[k+5];
dst1[i+3] += coef1[-k-0] * ps[k+3] + coef1[-k-1] * ps[k+4] + coef1[-k-2] * ps[k+5] + coef1[-k-3] * ps[k+6];
// channel 2
dst2[i+0] += coef2[-k-0] * ps[k+0] + coef2[-k-1] * ps[k+1] + coef2[-k-2] * ps[k+2] + coef2[-k-3] * ps[k+3];
dst2[i+1] += coef2[-k-0] * ps[k+1] + coef2[-k-1] * ps[k+2] + coef2[-k-2] * ps[k+3] + coef2[-k-3] * ps[k+4];
dst2[i+2] += coef2[-k-0] * ps[k+2] + coef2[-k-1] * ps[k+3] + coef2[-k-2] * ps[k+4] + coef2[-k-3] * ps[k+5];
dst2[i+3] += coef2[-k-0] * ps[k+3] + coef2[-k-1] * ps[k+4] + coef2[-k-2] * ps[k+5] + coef2[-k-3] * ps[k+6];
// channel 3
dst3[i+0] += coef3[-k-0] * ps[k+0] + coef3[-k-1] * ps[k+1] + coef3[-k-2] * ps[k+2] + coef3[-k-3] * ps[k+3];
dst3[i+1] += coef3[-k-0] * ps[k+1] + coef3[-k-1] * ps[k+2] + coef3[-k-2] * ps[k+3] + coef3[-k-3] * ps[k+4];
dst3[i+2] += coef3[-k-0] * ps[k+2] + coef3[-k-1] * ps[k+3] + coef3[-k-2] * ps[k+4] + coef3[-k-3] * ps[k+5];
dst3[i+3] += coef3[-k-0] * ps[k+3] + coef3[-k-1] * ps[k+4] + coef3[-k-2] * ps[k+5] + coef3[-k-3] * ps[k+6];
}
}
}
// 4 channel planar to interleaved
static void interleave_4x4(float* src0, float* src1, float* src2, float* src3, float* dst, int numFrames) {
for (int i = 0; i < numFrames; i++) {
dst[4*i+0] = src0[i];
dst[4*i+1] = src1[i];
dst[4*i+2] = src2[i];
dst[4*i+3] = src3[i];
}
}
// 4 channels (interleaved)
static void biquad_4x4(float* src, float* dst, float coef[5][4], float state[2][4], int numFrames) {
// channel 0
float w10 = state[0][0];
float w20 = state[1][0];
float b00 = coef[0][0];
float b10 = coef[1][0];
float b20 = coef[2][0];
float a10 = coef[3][0];
float a20 = coef[4][0];
// channel 1
float w11 = state[0][1];
float w21 = state[1][1];
float b01 = coef[0][1];
float b11 = coef[1][1];
float b21 = coef[2][1];
float a11 = coef[3][1];
float a21 = coef[4][1];
// channel 2
float w12 = state[0][2];
float w22 = state[1][2];
float b02 = coef[0][2];
float b12 = coef[1][2];
float b22 = coef[2][2];
float a12 = coef[3][2];
float a22 = coef[4][2];
// channel 3
float w13 = state[0][3];
float w23 = state[1][3];
float b03 = coef[0][3];
float b13 = coef[1][3];
float b23 = coef[2][3];
float a13 = coef[3][3];
float a23 = coef[4][3];
for (int i = 0; i < numFrames; i++) {
float x00 = src[4*i+0] + 1.0e-20f; // prevent denormals
float x01 = src[4*i+1] + 1.0e-20f;
float x02 = src[4*i+2] + 1.0e-20f;
float x03 = src[4*i+3] + 1.0e-20f;
float y00, y01, y02, y03;
// transposed Direct Form II
y00 = b00 * x00 + w10;
w10 = b10 * x00 - a10 * y00 + w20;
w20 = b20 * x00 - a20 * y00;
y01 = b01 * x01 + w11;
w11 = b11 * x01 - a11 * y01 + w21;
w21 = b21 * x01 - a21 * y01;
y02 = b02 * x02 + w12;
w12 = b12 * x02 - a12 * y02 + w22;
w22 = b22 * x02 - a22 * y02;
y03 = b03 * x03 + w13;
w13 = b13 * x03 - a13 * y03 + w23;
w23 = b23 * x03 - a23 * y03;
dst[4*i+0] = y00;
dst[4*i+1] = y01;
dst[4*i+2] = y02;
dst[4*i+3] = y03;
}
// save state
state[0][0] = w10;
state[1][0] = w20;
state[0][1] = w11;
state[1][1] = w21;
state[0][2] = w12;
state[1][2] = w22;
state[0][3] = w13;
state[1][3] = w23;
}
// crossfade 4 inputs into 2 outputs with accumulation (interleaved)
static void crossfade_4x2(float* src, float* dst, const float* win, int numFrames) {
for (int i = 0; i < numFrames; i++) {
float frac = win[i];
dst[2*i+0] += src[4*i+2] + frac * (src[4*i+0] - src[4*i+2]);
dst[2*i+1] += src[4*i+3] + frac * (src[4*i+1] - src[4*i+3]);
}
}
// linear interpolation with gain
static void interpolate(float* dst, const float* src0, const float* src1, float frac, float gain) {
float f0 = HRTF_GAIN * gain * (1.0f - frac);
float f1 = HRTF_GAIN * gain * frac;
for (int k = 0; k < HRTF_TAPS; k++) {
dst[k] = f0 * src0[k] + f1 * src1[k];
}
}
#endif
// design a 2nd order Thiran allpass
static void ThiranBiquad(float f, float& b0, float& b1, float& b2, float& a1, float& a2) {
a1 = -2.0f * (f - 2.0f) / (f + 1.0f);
a2 = ((f - 1.0f) * (f - 2.0f)) / ((f + 1.0f) * (f + 2.0f));
b0 = a2;
b1 = a1;
b2 = 1.0f;
}
// compute new filters for a given azimuth and gain
static void setAzimuthAndGain(float firCoef[4][HRTF_TAPS], float bqCoef[5][4], int delay[4],
int index, float azimuth, float gain, int channel) {
// convert from radians to table units
//azimuth *= HRTF_AZIMUTHS / (2.0f * M_PI);
// convert from degrees to table units
azimuth *= HRTF_AZIMUTHS / 360.0f;
// wrap to principle value
while (azimuth < 0.0f) {
azimuth += HRTF_AZIMUTHS;
}
while (azimuth >= HRTF_AZIMUTHS) {
azimuth -= HRTF_AZIMUTHS;
}
// table parameters
int az0 = (int)azimuth;
int az1 = (az0 + 1) % HRTF_AZIMUTHS;
float frac = azimuth - (float)az0;
assert((az0 >= 0) && (az0 < HRTF_AZIMUTHS));
assert((az1 >= 0) && (az1 < HRTF_AZIMUTHS));
assert((frac >= 0.0f) && (frac < 1.0f));
// interpolate FIR
interpolate(firCoef[channel+0], ir_table_table[index][az0][0], ir_table_table[index][az1][0], frac, gain);
interpolate(firCoef[channel+1], ir_table_table[index][az0][1], ir_table_table[index][az1][1], frac, gain);
// interpolate ITD
float itd = (1.0f - frac) * itd_table_table[index][az0] + frac * itd_table_table[index][az1];
// split ITD into integer and fractional delay
int itdi = (int)fabsf(itd);
float itdf = fabsf(itd) - (float)itdi;
assert(itdi <= HRTF_DELAY);
assert(itdf <= 1.0f);
//
// Compute a 2nd-order Thiran allpass for the fractional delay.
// With nominal delay of 2, the active range of [2.0, 3.0] results
// in group delay flat to 1.5KHz and fast transient settling time.
//
float b0, b1, b2, a1, a2;
ThiranBiquad(2.0f + itdf, b0, b1, b2, a1, a2);
// positive ITD means left channel is delayed
if (itd >= 0.0f) {
// left (contralateral) = 2 + itdi + itdf
bqCoef[0][channel+0] = b0;
bqCoef[1][channel+0] = b1;
bqCoef[2][channel+0] = b2;
bqCoef[3][channel+0] = a1;
bqCoef[4][channel+0] = a2;
delay[channel+0] = itdi;
// right (ipsilateral) = 2
bqCoef[0][channel+1] = 0.0f;
bqCoef[1][channel+1] = 0.0f;
bqCoef[2][channel+1] = 1.0f;
bqCoef[3][channel+1] = 0.0f;
bqCoef[4][channel+1] = 0.0f;
delay[channel+1] = 0;
} else {
// left (ipsilateral) = 2
bqCoef[0][channel+0] = 0.0f;
bqCoef[1][channel+0] = 0.0f;
bqCoef[2][channel+0] = 1.0f;
bqCoef[3][channel+0] = 0.0f;
bqCoef[4][channel+0] = 0.0f;
delay[channel+0] = 0;
// right (contralateral) = 2 + itdi + itdf
bqCoef[0][channel+1] = b0;
bqCoef[1][channel+1] = b1;
bqCoef[2][channel+1] = b2;
bqCoef[3][channel+1] = a1;
bqCoef[4][channel+1] = a2;
delay[channel+1] = itdi;
}
}
void AudioHRTF::render(int16_t* input, float* output, int index, float azimuth, float gain, int numFrames) {
assert(index >= 0);
assert(index < HRTF_TABLES);
assert(numFrames == HRTF_BLOCK);
float in[HRTF_TAPS + HRTF_BLOCK]; // mono
float firCoef[4][HRTF_TAPS]; // 4-channel
float firBuffer[4][HRTF_DELAY + HRTF_BLOCK]; // 4-channel
float bqCoef[5][4]; // 4-channel (interleaved)
float bqBuffer[4 * HRTF_BLOCK]; // 4-channel (interleaved)
int delay[4]; // 4-channel (interleaved)
// to avoid polluting the cache, old filters are recomputed instead of stored
setAzimuthAndGain(firCoef, bqCoef, delay, index, _azimuthState, _gainState, L0);
// compute new filters
setAzimuthAndGain(firCoef, bqCoef, delay, index, azimuth, gain, L1);
// new parameters become old
_azimuthState = azimuth;
_gainState = gain;
// convert mono input to float
for (int i = 0; i < HRTF_BLOCK; i++) {
in[HRTF_TAPS+i] = (float)input[i] * (1/32768.0f);
}
// FIR state update
memcpy(in, _firState, HRTF_TAPS * sizeof(float));
memcpy(_firState, &in[HRTF_BLOCK], HRTF_TAPS * sizeof(float));
// process old/new FIR
FIR_1x4(&in[HRTF_TAPS],
&firBuffer[L0][HRTF_DELAY],
&firBuffer[R0][HRTF_DELAY],
&firBuffer[L1][HRTF_DELAY],
&firBuffer[R1][HRTF_DELAY],
firCoef, HRTF_BLOCK);
// delay state update
memcpy(firBuffer[L0], _delayState[L0], HRTF_DELAY * sizeof(float));
memcpy(firBuffer[R0], _delayState[R0], HRTF_DELAY * sizeof(float));
memcpy(firBuffer[L1], _delayState[L1], HRTF_DELAY * sizeof(float));
memcpy(firBuffer[R1], _delayState[R1], HRTF_DELAY * sizeof(float));
memcpy(_delayState[L0], &firBuffer[L1][HRTF_BLOCK], HRTF_DELAY * sizeof(float)); // new state becomes old
memcpy(_delayState[R0], &firBuffer[R1][HRTF_BLOCK], HRTF_DELAY * sizeof(float)); // new state becomes old
memcpy(_delayState[L1], &firBuffer[L1][HRTF_BLOCK], HRTF_DELAY * sizeof(float));
memcpy(_delayState[R1], &firBuffer[R1][HRTF_BLOCK], HRTF_DELAY * sizeof(float));
// interleave with old/new integer delay
interleave_4x4(&firBuffer[L0][HRTF_DELAY] - delay[L0],
&firBuffer[R0][HRTF_DELAY] - delay[R0],
&firBuffer[L1][HRTF_DELAY] - delay[L1],
&firBuffer[R1][HRTF_DELAY] - delay[R1],
bqBuffer, HRTF_BLOCK);
// process old/new fractional delay
biquad_4x4(bqBuffer, bqBuffer, bqCoef, _bqState, HRTF_BLOCK);
// new state becomes old
_bqState[0][L0] = _bqState[0][L1];
_bqState[1][L0] = _bqState[1][L1];
_bqState[0][R0] = _bqState[0][R1];
_bqState[1][R0] = _bqState[1][R1];
// crossfade old/new output and accumulate
crossfade_4x2(bqBuffer, output, crossfadeTable, HRTF_BLOCK);
_silentState = false;
}
void AudioHRTF::renderSilent(int16_t* input, float* output, int index, float azimuth, float gain, int numFrames) {
// process the first silent block, to flush internal state
if (!_silentState) {
render(input, output, index, azimuth, gain, numFrames);
}
// new parameters become old
_azimuthState = azimuth;
_gainState = gain;
_silentState = true;
}

View file

@ -0,0 +1,74 @@
//
// AudioHRTF.h
// libraries/audio/src
//
// Created by Ken Cooke on 1/17/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioHRTF_h
#define hifi_AudioHRTF_h
#include <stdint.h>
static const int HRTF_AZIMUTHS = 72; // 360 / 5-degree steps
static const int HRTF_TAPS = 64; // minimum-phase FIR coefficients
static const int HRTF_TABLES = 25; // number of HRTF subjects
static const int HRTF_DELAY = 24; // max ITD in samples (1.0ms at 24KHz)
static const int HRTF_BLOCK = 256; // block processing size
static const float HRTF_GAIN = 0.5f; // HRTF global gain adjustment
class AudioHRTF {
public:
//
// input: mono source
// output: interleaved stereo mix buffer (accumulates into existing output)
// index: HRTF subject index
// azimuth: clockwise panning angle [0, 360] in degrees
// gain: gain factor for distance attenuation
// numFrames: must be HRTF_BLOCK in this version
//
void render(int16_t* input, float* output, int index, float azimuth, float gain, int numFrames);
//
// Fast path when input is known to be silent
//
void renderSilent(int16_t* input, float* output, int index, float azimuth, float gain, int numFrames);
private:
// SIMD channel assignmentS
enum Channel {
L0,
R0,
L1,
R1
};
// For best cache utilization when processing thousands of instances, only
// the minimum persistant state is stored here. No coefs or work buffers.
// FIR history
float _firState[HRTF_TAPS] = {};
// integer delay history
float _delayState[4][HRTF_DELAY] = {};
// fractional delay history
float _bqState[2][4] = {};
// parameter history
float _azimuthState = 0.0f;
float _gainState = 0.0f;
bool _silentState = false;
};
#endif // AudioHRTF_h

File diff suppressed because it is too large Load diff

View file

@ -258,6 +258,8 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
qCDebug(audio) << "Error reading WAV file";
}
_duration = (float) (outputAudioByteArraySize / (fileHeader.wave.sampleRate * fileHeader.wave.numChannels * fileHeader.wave.bitsPerSample / 8.0f));
} else {
qCDebug(audio) << "Could not read wav audio file header.";
return;

View file

@ -22,12 +22,15 @@ class Sound : public Resource {
Q_OBJECT
Q_PROPERTY(bool downloaded READ isReady)
Q_PROPERTY(float duration READ getDuration)
public:
Sound(const QUrl& url, bool isStereo = false);
bool isStereo() const { return _isStereo; }
bool isReady() const { return _isReady; }
float getDuration() { return _duration; }
const QByteArray& getByteArray() { return _byteArray; }
signals:
@ -37,6 +40,7 @@ private:
QByteArray _byteArray;
bool _isStereo;
bool _isReady;
float _duration; // In seconds
void downSample(const QByteArray& rawAudioByteArray);
void interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);

View file

@ -0,0 +1,96 @@
//
// AudioHRTF_avx.cpp
// libraries/audio/src/avx
//
// Created by Ken Cooke on 1/17/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__x86_64__)
#include <assert.h>
#include <immintrin.h>
#include "../AudioHRTF.h"
#ifndef __AVX__
#error Must be compiled with /arch:AVX or -mavx.
#endif
// 1 channel input, 4 channel output
void FIR_1x4_AVX(float* src, float* dst0, float* dst1, float* dst2, float* dst3, float coef[4][HRTF_TAPS], int numFrames) {
float* coef0 = coef[0] + HRTF_TAPS - 1; // process backwards
float* coef1 = coef[1] + HRTF_TAPS - 1;
float* coef2 = coef[2] + HRTF_TAPS - 1;
float* coef3 = coef[3] + HRTF_TAPS - 1;
assert(numFrames % 8 == 0);
for (int i = 0; i < numFrames; i += 8) {
__m256 acc0 = _mm256_setzero_ps();
__m256 acc1 = _mm256_setzero_ps();
__m256 acc2 = _mm256_setzero_ps();
__m256 acc3 = _mm256_setzero_ps();
float* ps = &src[i - HRTF_TAPS + 1]; // process forwards
assert(HRTF_TAPS % 8 == 0);
for (int k = 0; k < HRTF_TAPS; k += 8) {
acc0 = _mm256_add_ps(acc0, _mm256_mul_ps(_mm256_broadcast_ss(&coef0[-k-0]), _mm256_loadu_ps(&ps[k+0])));
acc1 = _mm256_add_ps(acc1, _mm256_mul_ps(_mm256_broadcast_ss(&coef1[-k-0]), _mm256_loadu_ps(&ps[k+0])));
acc2 = _mm256_add_ps(acc2, _mm256_mul_ps(_mm256_broadcast_ss(&coef2[-k-0]), _mm256_loadu_ps(&ps[k+0])));
acc3 = _mm256_add_ps(acc3, _mm256_mul_ps(_mm256_broadcast_ss(&coef3[-k-0]), _mm256_loadu_ps(&ps[k+0])));
acc0 = _mm256_add_ps(acc0, _mm256_mul_ps(_mm256_broadcast_ss(&coef0[-k-1]), _mm256_loadu_ps(&ps[k+1])));
acc1 = _mm256_add_ps(acc1, _mm256_mul_ps(_mm256_broadcast_ss(&coef1[-k-1]), _mm256_loadu_ps(&ps[k+1])));
acc2 = _mm256_add_ps(acc2, _mm256_mul_ps(_mm256_broadcast_ss(&coef2[-k-1]), _mm256_loadu_ps(&ps[k+1])));
acc3 = _mm256_add_ps(acc3, _mm256_mul_ps(_mm256_broadcast_ss(&coef3[-k-1]), _mm256_loadu_ps(&ps[k+1])));
acc0 = _mm256_add_ps(acc0, _mm256_mul_ps(_mm256_broadcast_ss(&coef0[-k-2]), _mm256_loadu_ps(&ps[k+2])));
acc1 = _mm256_add_ps(acc1, _mm256_mul_ps(_mm256_broadcast_ss(&coef1[-k-2]), _mm256_loadu_ps(&ps[k+2])));
acc2 = _mm256_add_ps(acc2, _mm256_mul_ps(_mm256_broadcast_ss(&coef2[-k-2]), _mm256_loadu_ps(&ps[k+2])));
acc3 = _mm256_add_ps(acc3, _mm256_mul_ps(_mm256_broadcast_ss(&coef3[-k-2]), _mm256_loadu_ps(&ps[k+2])));
acc0 = _mm256_add_ps(acc0, _mm256_mul_ps(_mm256_broadcast_ss(&coef0[-k-3]), _mm256_loadu_ps(&ps[k+3])));
acc1 = _mm256_add_ps(acc1, _mm256_mul_ps(_mm256_broadcast_ss(&coef1[-k-3]), _mm256_loadu_ps(&ps[k+3])));
acc2 = _mm256_add_ps(acc2, _mm256_mul_ps(_mm256_broadcast_ss(&coef2[-k-3]), _mm256_loadu_ps(&ps[k+3])));
acc3 = _mm256_add_ps(acc3, _mm256_mul_ps(_mm256_broadcast_ss(&coef3[-k-3]), _mm256_loadu_ps(&ps[k+3])));
acc0 = _mm256_add_ps(acc0, _mm256_mul_ps(_mm256_broadcast_ss(&coef0[-k-4]), _mm256_loadu_ps(&ps[k+4])));
acc1 = _mm256_add_ps(acc1, _mm256_mul_ps(_mm256_broadcast_ss(&coef1[-k-4]), _mm256_loadu_ps(&ps[k+4])));
acc2 = _mm256_add_ps(acc2, _mm256_mul_ps(_mm256_broadcast_ss(&coef2[-k-4]), _mm256_loadu_ps(&ps[k+4])));
acc3 = _mm256_add_ps(acc3, _mm256_mul_ps(_mm256_broadcast_ss(&coef3[-k-4]), _mm256_loadu_ps(&ps[k+4])));
acc0 = _mm256_add_ps(acc0, _mm256_mul_ps(_mm256_broadcast_ss(&coef0[-k-5]), _mm256_loadu_ps(&ps[k+5])));
acc1 = _mm256_add_ps(acc1, _mm256_mul_ps(_mm256_broadcast_ss(&coef1[-k-5]), _mm256_loadu_ps(&ps[k+5])));
acc2 = _mm256_add_ps(acc2, _mm256_mul_ps(_mm256_broadcast_ss(&coef2[-k-5]), _mm256_loadu_ps(&ps[k+5])));
acc3 = _mm256_add_ps(acc3, _mm256_mul_ps(_mm256_broadcast_ss(&coef3[-k-5]), _mm256_loadu_ps(&ps[k+5])));
acc0 = _mm256_add_ps(acc0, _mm256_mul_ps(_mm256_broadcast_ss(&coef0[-k-6]), _mm256_loadu_ps(&ps[k+6])));
acc1 = _mm256_add_ps(acc1, _mm256_mul_ps(_mm256_broadcast_ss(&coef1[-k-6]), _mm256_loadu_ps(&ps[k+6])));
acc2 = _mm256_add_ps(acc2, _mm256_mul_ps(_mm256_broadcast_ss(&coef2[-k-6]), _mm256_loadu_ps(&ps[k+6])));
acc3 = _mm256_add_ps(acc3, _mm256_mul_ps(_mm256_broadcast_ss(&coef3[-k-6]), _mm256_loadu_ps(&ps[k+6])));
acc0 = _mm256_add_ps(acc0, _mm256_mul_ps(_mm256_broadcast_ss(&coef0[-k-7]), _mm256_loadu_ps(&ps[k+7])));
acc1 = _mm256_add_ps(acc1, _mm256_mul_ps(_mm256_broadcast_ss(&coef1[-k-7]), _mm256_loadu_ps(&ps[k+7])));
acc2 = _mm256_add_ps(acc2, _mm256_mul_ps(_mm256_broadcast_ss(&coef2[-k-7]), _mm256_loadu_ps(&ps[k+7])));
acc3 = _mm256_add_ps(acc3, _mm256_mul_ps(_mm256_broadcast_ss(&coef3[-k-7]), _mm256_loadu_ps(&ps[k+7])));
}
_mm256_storeu_ps(&dst0[i], acc0);
_mm256_storeu_ps(&dst1[i], acc1);
_mm256_storeu_ps(&dst2[i], acc2);
_mm256_storeu_ps(&dst3[i], acc3);
}
_mm256_zeroupper();
}
#endif

View file

@ -21,11 +21,13 @@
#include <gl/GLWidget.h>
#include <NumericalConstants.h>
#include <DependencyManager.h>
#include <plugins/PluginContainer.h>
#include <gl/Config.h>
#include <gl/GLEscrow.h>
#include <GLMHelpers.h>
#if THREADED_PRESENT
class PresentThread : public QThread, public Dependency {
using Mutex = std::mutex;
using Condition = std::condition_variable;
@ -33,16 +35,25 @@ class PresentThread : public QThread, public Dependency {
public:
PresentThread() {
connect(qApp, &QCoreApplication::aboutToQuit, [this]{
_shutdown = true;
connect(qApp, &QCoreApplication::aboutToQuit, [this] {
shutdown();
});
}
~PresentThread() {
_shutdown = true;
wait();
shutdown();
}
void shutdown() {
if (isRunning()) {
Lock lock(_mutex);
_shutdown = true;
_condition.wait(lock, [&] { return !_shutdown; });
qDebug() << "Present thread shutdown";
}
}
void setNewDisplayPlugin(OpenGLDisplayPlugin* plugin) {
Lock lock(_mutex);
_newPlugin = plugin;
@ -120,6 +131,10 @@ public:
}
_context->doneCurrent();
_context->moveToThread(qApp->thread());
Lock lock(_mutex);
_shutdown = false;
_condition.notify_one();
}
void withMainThreadContext(std::function<void()> f) {
@ -159,16 +174,14 @@ private:
QGLContext* _context { nullptr };
};
#endif
OpenGLDisplayPlugin::OpenGLDisplayPlugin() {
_sceneTextureEscrow.setRecycler([this](GLuint texture){
cleanupForSceneTexture(texture);
_container->releaseSceneTexture(texture);
});
_overlayTextureEscrow.setRecycler([this](GLuint texture) {
_container->releaseOverlayTexture(texture);
});
connect(&_timer, &QTimer::timeout, this, [&] {
#ifdef Q_OS_MAC
// On Mac, QT thread timing is such that we can miss one or even two cycles quite often, giving a render rate (including update/simulate)
@ -191,9 +204,10 @@ void OpenGLDisplayPlugin::cleanupForSceneTexture(uint32_t sceneTexture) {
void OpenGLDisplayPlugin::activate() {
_timer.start(1);
_vsyncSupported = _container->getPrimaryWidget()->isVsyncSupported();
#if THREADED_PRESENT
_timer.start(1);
// Start the present thread if necessary
auto presentThread = DependencyManager::get<PresentThread>();
if (!presentThread) {
@ -208,7 +222,15 @@ void OpenGLDisplayPlugin::activate() {
presentThread->start();
}
presentThread->setNewDisplayPlugin(this);
#else
static auto widget = _container->getPrimaryWidget();
widget->makeCurrent();
customizeContext();
_container->makeRenderingContextCurrent();
#endif
DisplayPlugin::activate();
}
void OpenGLDisplayPlugin::stop() {
@ -216,19 +238,27 @@ void OpenGLDisplayPlugin::stop() {
}
void OpenGLDisplayPlugin::deactivate() {
#if THREADED_PRESENT
{
Lock lock(_mutex);
_deactivateWait.wait(lock, [&]{ return _uncustomized; });
}
_timer.stop();
#else
static auto widget = _container->getPrimaryWidget();
widget->makeCurrent();
uncustomizeContext();
_container->makeRenderingContextCurrent();
#endif
DisplayPlugin::deactivate();
}
void OpenGLDisplayPlugin::customizeContext() {
#if THREADED_PRESENT
_uncustomized = false;
auto presentThread = DependencyManager::get<PresentThread>();
Q_ASSERT(thread() == presentThread->thread());
#endif
enableVsync();
using namespace oglplus;
@ -297,16 +327,23 @@ void OpenGLDisplayPlugin::submitSceneTexture(uint32_t frameIndex, uint32_t scene
// Submit it to the presentation thread via escrow
_sceneTextureEscrow.submit(sceneTexture);
#if THREADED_PRESENT
#else
static auto widget = _container->getPrimaryWidget();
widget->makeCurrent();
present();
_container->makeRenderingContextCurrent();
#endif
}
void OpenGLDisplayPlugin::submitOverlayTexture(GLuint sceneTexture, const glm::uvec2& sceneSize) {
void OpenGLDisplayPlugin::submitOverlayTexture(GLuint overlayTexture, const glm::uvec2& overlaySize) {
// Submit it to the presentation thread via escrow
_overlayTextureEscrow.submit(sceneTexture);
_currentOverlayTexture = overlayTexture;
}
void OpenGLDisplayPlugin::updateTextures() {
_currentSceneTexture = _sceneTextureEscrow.fetchAndRelease(_currentSceneTexture);
_currentOverlayTexture = _overlayTextureEscrow.fetchAndRelease(_currentOverlayTexture);
}
void OpenGLDisplayPlugin::updateFramerate() {
@ -337,6 +374,11 @@ void OpenGLDisplayPlugin::present() {
internalPresent();
updateFramerate();
}
#if THREADED_PRESENT
#else
emit requestRender();
#endif
}
float OpenGLDisplayPlugin::presentRate() {
@ -351,12 +393,8 @@ float OpenGLDisplayPlugin::presentRate() {
}
void OpenGLDisplayPlugin::drawUnitQuad() {
try {
_program->Bind();
_plane->Draw();
} catch (const oglplus::Error& error) {
qWarning() << "The present thread encountered an error writing the scene texture to the output: " << error.what();
}
_program->Bind();
_plane->Draw();
}
void OpenGLDisplayPlugin::enableVsync(bool enable) {
@ -385,9 +423,16 @@ void OpenGLDisplayPlugin::swapBuffers() {
}
void OpenGLDisplayPlugin::withMainThreadContext(std::function<void()> f) const {
#if THREADED_PRESENT
static auto presentThread = DependencyManager::get<PresentThread>();
presentThread->withMainThreadContext(f);
_container->makeRenderingContextCurrent();
#else
static auto widget = _container->getPrimaryWidget();
widget->makeCurrent();
f();
_container->makeRenderingContextCurrent();
#endif
}
QImage OpenGLDisplayPlugin::getScreenshot() const {
@ -399,8 +444,10 @@ QImage OpenGLDisplayPlugin::getScreenshot() const {
return result;
}
#if THREADED_PRESENT
void OpenGLDisplayPlugin::enableDeactivate() {
Lock lock(_mutex);
_uncustomized = true;
_deactivateWait.notify_one();
}
}
#endif

View file

@ -18,6 +18,8 @@
#include <gl/OglplusHelpers.h>
#include <gl/GLEscrow.h>
#define THREADED_PRESENT 1
class OpenGLDisplayPlugin : public DisplayPlugin {
protected:
using Mutex = std::mutex;
@ -45,8 +47,9 @@ public:
virtual QImage getScreenshot() const override;
protected:
#if THREADED_PRESENT
friend class PresentThread;
#endif
virtual glm::uvec2 getSurfaceSize() const = 0;
virtual glm::uvec2 getSurfacePixels() const = 0;
@ -81,15 +84,16 @@ protected:
GLuint _currentSceneTexture { 0 };
GLuint _currentOverlayTexture { 0 };
GLTextureEscrow _overlayTextureEscrow;
GLTextureEscrow _sceneTextureEscrow;
bool _vsyncSupported { false };
private:
#if THREADED_PRESENT
void enableDeactivate();
Condition _deactivateWait;
bool _uncustomized{ false };
#endif
};

View file

@ -511,9 +511,10 @@ void DeferredLightingEffect::render(const render::RenderContextPointer& renderCo
}
}
void DeferredLightingEffect::setupTransparent(RenderArgs* args, int lightBufferUnit) {
void DeferredLightingEffect::setupBatch(gpu::Batch& batch, int lightBufferUnit) {
PerformanceTimer perfTimer("DLE->setupBatch()");
auto globalLight = _allocatedLights[_globalLights.front()];
args->_batch->setUniformBuffer(lightBufferUnit, globalLight->getSchemaBuffer());
batch.setUniformBuffer(lightBufferUnit, globalLight->getSchemaBuffer());
}
static void loadLightProgram(const char* vertSource, const char* fragSource, bool lightVolume, gpu::PipelinePointer& pipeline, LightLocationsPtr& locations) {

View file

@ -46,7 +46,7 @@ public:
void prepare(RenderArgs* args);
void render(const render::RenderContextPointer& renderContext);
void setupTransparent(RenderArgs* args, int lightBufferUnit);
void setupBatch(gpu::Batch& batch, int lightBufferUnit);
// update global lighting
void setAmbientLightMode(int preset);

View file

@ -494,10 +494,22 @@ gpu::Stream::FormatPointer& getInstancedSolidStreamFormat() {
return INSTANCED_SOLID_STREAM_FORMAT;
}
render::ShapePipelinePointer GeometryCache::_simplePipeline;
GeometryCache::GeometryCache() :
_nextID(0)
{
buildShapes();
GeometryCache::_simplePipeline =
std::make_shared<render::ShapePipeline>(getSimplePipeline(), nullptr,
[](const render::ShapePipeline&, gpu::Batch& batch) {
// Set the defaults needed for a simple program
batch.setResourceTexture(render::ShapePipeline::Slot::DIFFUSE_MAP,
DependencyManager::get<TextureCache>()->getWhiteTexture());
batch.setResourceTexture(render::ShapePipeline::Slot::NORMAL_FITTING_MAP,
DependencyManager::get<TextureCache>()->getNormalFittingTexture());
}
);
}
GeometryCache::~GeometryCache() {
@ -536,14 +548,6 @@ void GeometryCache::renderWireShapeInstances(gpu::Batch& batch, Shape shape, siz
_shapes[shape].drawWireInstances(batch, count);
}
void GeometryCache::renderCubeInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer colorBuffer) {
renderShapeInstances(batch, Cube, count, colorBuffer);
}
void GeometryCache::renderWireCubeInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer colorBuffer) {
renderWireShapeInstances(batch, Cube, count, colorBuffer);
}
void GeometryCache::renderCube(gpu::Batch& batch) {
renderShape(batch, Cube);
}
@ -552,10 +556,6 @@ void GeometryCache::renderWireCube(gpu::Batch& batch) {
renderWireShape(batch, Cube);
}
void GeometryCache::renderSphereInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer colorBuffer) {
renderShapeInstances(batch, Sphere, count, colorBuffer);
}
void GeometryCache::renderSphere(gpu::Batch& batch) {
renderShape(batch, Sphere);
}
@ -756,7 +756,9 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec2>& points, con
#endif // def WANT_DEBUG
}
const int FLOATS_PER_VERTEX = 2;
const int FLOATS_PER_VERTEX = 2 + 3; // vertices + normals
const int NUM_POS_COORDS = 2;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
details.isCreated = true;
details.vertices = points.size();
details.vertexSize = FLOATS_PER_VERTEX;
@ -772,6 +774,7 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec2>& points, con
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
@ -791,9 +794,13 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec2>& points, con
int* colorData = new int[details.vertices];
int* colorDataAt = colorData;
const glm::vec3 NORMAL(0.0f, 0.0f, 1.0f);
foreach (const glm::vec2& point, points) {
*(vertex++) = point.x;
*(vertex++) = point.y;
*(vertex++) = NORMAL.x;
*(vertex++) = NORMAL.y;
*(vertex++) = NORMAL.z;
*(colorDataAt++) = compactColor;
}
@ -817,7 +824,9 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec3>& points, con
#endif // def WANT_DEBUG
}
const int FLOATS_PER_VERTEX = 3;
const int FLOATS_PER_VERTEX = 3 + 3; // vertices + normals
const int NUM_POS_COORDS = 3;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
details.isCreated = true;
details.vertices = points.size();
details.vertexSize = FLOATS_PER_VERTEX;
@ -833,6 +842,7 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec3>& points, con
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
@ -852,10 +862,14 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec3>& points, con
int* colorData = new int[details.vertices];
int* colorDataAt = colorData;
const glm::vec3 NORMAL(0.0f, 0.0f, 1.0f);
foreach (const glm::vec3& point, points) {
*(vertex++) = point.x;
*(vertex++) = point.y;
*(vertex++) = point.z;
*(vertex++) = NORMAL.x;
*(vertex++) = NORMAL.y;
*(vertex++) = NORMAL.z;
*(colorDataAt++) = compactColor;
}
@ -880,7 +894,11 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec3>& points, con
#endif // def WANT_DEBUG
}
const int FLOATS_PER_VERTEX = 5;
const int FLOATS_PER_VERTEX = 3 + 3 + 2; // vertices + normals + tex coords
const int NUM_POS_COORDS = 3;
const int NUM_NORMAL_COORDS = 3;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
const int VERTEX_TEX_OFFSET = VERTEX_NORMAL_OFFSET + NUM_NORMAL_COORDS * sizeof(float);
details.isCreated = true;
details.vertices = points.size();
details.vertexSize = FLOATS_PER_VERTEX;
@ -896,7 +914,8 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec3>& points, con
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::TEXCOORD, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV), 3 * sizeof(float));
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::TEXCOORD, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV), VERTEX_TEX_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
@ -918,12 +937,16 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec3>& points, con
int* colorData = new int[details.vertices];
int* colorDataAt = colorData;
const glm::vec3 NORMAL(0.0f, 0.0f, 1.0f);
for (int i = 0; i < points.size(); i++) {
glm::vec3 point = points[i];
glm::vec2 texCoord = texCoords[i];
*(vertex++) = point.x;
*(vertex++) = point.y;
*(vertex++) = point.z;
*(vertex++) = NORMAL.x;
*(vertex++) = NORMAL.y;
*(vertex++) = NORMAL.z;
*(vertex++) = texCoord.x;
*(vertex++) = texCoord.y;
@ -1073,8 +1096,10 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec2& minCorner, co
#endif // def WANT_DEBUG
}
const int FLOATS_PER_VERTEX = 2; // vertices
const int FLOATS_PER_VERTEX = 2 + 3; // vertices + normals
const int VERTICES = 4; // 1 quad = 4 vertices
const int NUM_POS_COORDS = 2;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
if (!details.isCreated) {
@ -1093,17 +1118,19 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec2& minCorner, co
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
const glm::vec3 NORMAL(0.0f, 0.0f, 1.0f);
float vertexBuffer[VERTICES * FLOATS_PER_VERTEX] = {
minCorner.x, minCorner.y,
maxCorner.x, minCorner.y,
minCorner.x, maxCorner.y,
maxCorner.x, maxCorner.y,
minCorner.x, minCorner.y, NORMAL.x, NORMAL.y, NORMAL.z,
maxCorner.x, minCorner.y, NORMAL.x, NORMAL.y, NORMAL.z,
minCorner.x, maxCorner.y, NORMAL.x, NORMAL.y, NORMAL.z,
maxCorner.x, maxCorner.y, NORMAL.x, NORMAL.y, NORMAL.z,
};
const int NUM_COLOR_SCALARS_PER_QUAD = 4;
@ -1158,10 +1185,12 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec2& minCorner, co
#endif // def WANT_DEBUG
}
const int FLOATS_PER_VERTEX = 2 * 2; // text coords & vertices
const int FLOATS_PER_VERTEX = 2 + 3 + 2; // vertices + normals + tex coords
const int VERTICES = 4; // 1 quad = 4 vertices
const int NUM_POS_COORDS = 2;
const int VERTEX_TEXCOORD_OFFSET = NUM_POS_COORDS * sizeof(float);
const int NUM_NORMAL_COORDS = 3;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
const int VERTEX_TEXCOORD_OFFSET = VERTEX_NORMAL_OFFSET + NUM_NORMAL_COORDS * sizeof(float);
if (!details.isCreated) {
@ -1181,7 +1210,9 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec2& minCorner, co
details.streamFormat = streamFormat;
details.stream = stream;
// zzmp: fix the normal across all renderQuad
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::TEXCOORD, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV), VERTEX_TEXCOORD_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
@ -1189,11 +1220,12 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec2& minCorner, co
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
const glm::vec3 NORMAL(0.0f, 0.0f, 1.0f);
float vertexBuffer[VERTICES * FLOATS_PER_VERTEX] = {
minCorner.x, minCorner.y, texCoordMinCorner.x, texCoordMinCorner.y,
maxCorner.x, minCorner.y, texCoordMaxCorner.x, texCoordMinCorner.y,
minCorner.x, maxCorner.y, texCoordMinCorner.x, texCoordMaxCorner.y,
maxCorner.x, maxCorner.y, texCoordMaxCorner.x, texCoordMaxCorner.y,
minCorner.x, minCorner.y, NORMAL.x, NORMAL.y, NORMAL.z, texCoordMinCorner.x, texCoordMinCorner.y,
maxCorner.x, minCorner.y, NORMAL.x, NORMAL.y, NORMAL.z, texCoordMaxCorner.x, texCoordMinCorner.y,
minCorner.x, maxCorner.y, NORMAL.x, NORMAL.y, NORMAL.z, texCoordMinCorner.x, texCoordMaxCorner.y,
maxCorner.x, maxCorner.y, NORMAL.x, NORMAL.y, NORMAL.z, texCoordMaxCorner.x, texCoordMaxCorner.y,
};
@ -1235,8 +1267,10 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec3& minCorner, co
#endif // def WANT_DEBUG
}
const int FLOATS_PER_VERTEX = 3; // vertices
const int FLOATS_PER_VERTEX = 3 + 3; // vertices + normals
const int VERTICES = 4; // 1 quad = 4 vertices
const int NUM_POS_COORDS = 3;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
if (!details.isCreated) {
@ -1257,17 +1291,19 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec3& minCorner, co
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
const glm::vec3 NORMAL(0.0f, 0.0f, 1.0f);
float vertexBuffer[VERTICES * FLOATS_PER_VERTEX] = {
minCorner.x, minCorner.y, minCorner.z,
maxCorner.x, minCorner.y, minCorner.z,
minCorner.x, maxCorner.y, maxCorner.z,
maxCorner.x, maxCorner.y, maxCorner.z,
minCorner.x, minCorner.y, minCorner.z, NORMAL.x, NORMAL.y, NORMAL.z,
maxCorner.x, minCorner.y, minCorner.z, NORMAL.x, NORMAL.y, NORMAL.z,
minCorner.x, maxCorner.y, maxCorner.z, NORMAL.x, NORMAL.y, NORMAL.z,
maxCorner.x, maxCorner.y, maxCorner.z, NORMAL.x, NORMAL.y, NORMAL.z,
};
const int NUM_COLOR_SCALARS_PER_QUAD = 4;
@ -1327,10 +1363,13 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec3& topLeft, cons
#endif // def WANT_DEBUG
}
const int FLOATS_PER_VERTEX = 3 + 2; // 3d vertices + text coords
const int FLOATS_PER_VERTEX = 3 + 3 + 2; // vertices + normals + tex coords
const int VERTICES = 4; // 1 quad = 4 vertices
const int NUM_POS_COORDS = 3;
const int VERTEX_TEXCOORD_OFFSET = NUM_POS_COORDS * sizeof(float);
const int NUM_NORMAL_COORDS = 3;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
const int VERTEX_TEXCOORD_OFFSET = VERTEX_NORMAL_OFFSET + NUM_NORMAL_COORDS * sizeof(float);
if (!details.isCreated) {
@ -1349,6 +1388,7 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec3& topLeft, cons
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::TEXCOORD, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV), VERTEX_TEXCOORD_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
@ -1356,12 +1396,13 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec3& topLeft, cons
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
const glm::vec3 NORMAL(0.0f, 0.0f, 1.0f);
float vertexBuffer[VERTICES * FLOATS_PER_VERTEX] = {
bottomLeft.x, bottomLeft.y, bottomLeft.z, texCoordBottomLeft.x, texCoordBottomLeft.y,
bottomRight.x, bottomRight.y, bottomRight.z, texCoordBottomRight.x, texCoordBottomRight.y,
topLeft.x, topLeft.y, topLeft.z, texCoordTopLeft.x, texCoordTopLeft.y,
topRight.x, topRight.y, topRight.z, texCoordTopRight.x, texCoordTopRight.y,
};
bottomLeft.x, bottomLeft.y, bottomLeft.z, NORMAL.x, NORMAL.y, NORMAL.z, texCoordBottomLeft.x, texCoordBottomLeft.y,
bottomRight.x, bottomRight.y, bottomRight.z, NORMAL.x, NORMAL.y, NORMAL.z, texCoordBottomRight.x, texCoordBottomRight.y,
topLeft.x, topLeft.y, topLeft.z, NORMAL.x, NORMAL.y, NORMAL.z, texCoordTopLeft.x, texCoordTopLeft.y,
topRight.x, topRight.y, topRight.z, NORMAL.x, NORMAL.y, NORMAL.z, texCoordTopRight.x, texCoordTopRight.y,
};
const int NUM_COLOR_SCALARS_PER_QUAD = 4;
int compactColor = ((int(color.x * 255.0f) & 0xFF)) |
@ -1414,7 +1455,9 @@ void GeometryCache::renderDashedLine(gpu::Batch& batch, const glm::vec3& start,
glm::vec3 dashVector = segmentVector / SEGMENT_LENGTH * dash_length;
glm::vec3 gapVector = segmentVector / SEGMENT_LENGTH * gap_length;
const int FLOATS_PER_VERTEX = 3;
const int FLOATS_PER_VERTEX = 3 + 3; // vertices + normals
const int NUM_POS_COORDS = 3;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
details.vertices = (segmentCountFloor + 1) * 2;
details.vertexSize = FLOATS_PER_VERTEX;
details.isCreated = true;
@ -1430,6 +1473,7 @@ void GeometryCache::renderDashedLine(gpu::Batch& batch, const glm::vec3& start,
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
@ -1441,10 +1485,14 @@ void GeometryCache::renderDashedLine(gpu::Batch& batch, const glm::vec3& start,
float* vertexData = new float[details.vertices * FLOATS_PER_VERTEX];
float* vertex = vertexData;
const glm::vec3 NORMAL(1.0f, 0.0f, 0.0f);
glm::vec3 point = start;
*(vertex++) = point.x;
*(vertex++) = point.y;
*(vertex++) = point.z;
*(vertex++) = NORMAL.x;
*(vertex++) = NORMAL.y;
*(vertex++) = NORMAL.z;
*(colorDataAt++) = compactColor;
for (int i = 0; i < segmentCountFloor; i++) {
@ -1452,17 +1500,26 @@ void GeometryCache::renderDashedLine(gpu::Batch& batch, const glm::vec3& start,
*(vertex++) = point.x;
*(vertex++) = point.y;
*(vertex++) = point.z;
*(vertex++) = NORMAL.x;
*(vertex++) = NORMAL.y;
*(vertex++) = NORMAL.z;
*(colorDataAt++) = compactColor;
point += gapVector;
*(vertex++) = point.x;
*(vertex++) = point.y;
*(vertex++) = point.z;
*(vertex++) = NORMAL.x;
*(vertex++) = NORMAL.y;
*(vertex++) = NORMAL.z;
*(colorDataAt++) = compactColor;
}
*(vertex++) = end.x;
*(vertex++) = end.y;
*(vertex++) = end.z;
*(vertex++) = NORMAL.x;
*(vertex++) = NORMAL.y;
*(vertex++) = NORMAL.z;
*(colorDataAt++) = compactColor;
details.verticesBuffer->append(sizeof(float) * FLOATS_PER_VERTEX * details.vertices, (gpu::Byte*) vertexData);
@ -1569,7 +1626,9 @@ void GeometryCache::renderLine(gpu::Batch& batch, const glm::vec3& p1, const glm
#endif // def WANT_DEBUG
}
const int FLOATS_PER_VERTEX = 3;
const int FLOATS_PER_VERTEX = 3 + 3; // vertices + normals
const int NUM_POS_COORDS = 3;
const int VERTEX_NORMAL_OFFSET = NUM_POS_COORDS * sizeof(float);
const int vertices = 2;
if (!details.isCreated) {
@ -1588,13 +1647,16 @@ void GeometryCache::renderLine(gpu::Batch& batch, const glm::vec3& p1, const glm
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::NORMAL, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), VERTEX_NORMAL_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
float vertexBuffer[vertices * FLOATS_PER_VERTEX] = { p1.x, p1.y, p1.z, p2.x, p2.y, p2.z };
const glm::vec3 NORMAL(1.0f, 0.0f, 0.0f);
float vertexBuffer[vertices * FLOATS_PER_VERTEX] = {
p1.x, p1.y, p1.z, NORMAL.x, NORMAL.y, NORMAL.z,
p2.x, p2.y, p2.z, NORMAL.x, NORMAL.y, NORMAL.z};
const int NUM_COLOR_SCALARS = 2;
int colors[NUM_COLOR_SCALARS] = { compactColor1, compactColor2 };
@ -1781,7 +1843,23 @@ inline bool operator==(const SimpleProgramKey& a, const SimpleProgramKey& b) {
return a.getRaw() == b.getRaw();
}
gpu::PipelinePointer GeometryCache::getPipeline(SimpleProgramKey config) {
void GeometryCache::bindSimpleProgram(gpu::Batch& batch, bool textured, bool culled, bool emissive, bool depthBiased) {
batch.setPipeline(getSimplePipeline(textured, culled, emissive, depthBiased));
// If not textured, set a default diffuse map
if (!textured) {
batch.setResourceTexture(render::ShapePipeline::Slot::DIFFUSE_MAP,
DependencyManager::get<TextureCache>()->getWhiteTexture());
}
// Set a default normal map
batch.setResourceTexture(render::ShapePipeline::Slot::NORMAL_FITTING_MAP,
DependencyManager::get<TextureCache>()->getNormalFittingTexture());
}
gpu::PipelinePointer GeometryCache::getSimplePipeline(bool textured, bool culled, bool emissive, bool depthBiased) {
SimpleProgramKey config{textured, culled, emissive, depthBiased};
// Compile the shaders
static std::once_flag once;
std::call_once(once, [&]() {
auto VS = gpu::Shader::createVertex(std::string(simple_vert));
@ -1796,13 +1874,14 @@ gpu::PipelinePointer GeometryCache::getPipeline(SimpleProgramKey config) {
gpu::Shader::makeProgram(*_simpleShader, slotBindings);
gpu::Shader::makeProgram(*_emissiveShader, slotBindings);
});
// If the pipeline already exists, return it
auto it = _simplePrograms.find(config);
if (it != _simplePrograms.end()) {
return it.value();
}
// If the pipeline did not exist, make it
auto state = std::make_shared<gpu::State>();
if (config.isCulled()) {
state->setCullMode(gpu::State::CULL_BACK);
@ -1815,33 +1894,15 @@ gpu::PipelinePointer GeometryCache::getPipeline(SimpleProgramKey config) {
state->setDepthBiasSlopeScale(1.0f);
}
state->setBlendFunction(false,
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
gpu::ShaderPointer program = (config.isEmissive()) ? _emissiveShader : _simpleShader;
gpu::PipelinePointer pipeline = gpu::Pipeline::create(program, state);
_simplePrograms.insert(config, pipeline);
return pipeline;
}
gpu::PipelinePointer GeometryCache::bindSimpleProgram(gpu::Batch& batch, bool textured, bool culled,
bool emissive, bool depthBias) {
SimpleProgramKey config{textured, culled, emissive, depthBias};
gpu::PipelinePointer pipeline = getPipeline(config);
batch.setPipeline(pipeline);
gpu::ShaderPointer program = (config.isEmissive()) ? _emissiveShader : _simpleShader;
if (!config.isTextured()) {
// If it is not textured, bind white texture and keep using textured pipeline
batch.setResourceTexture(0, DependencyManager::get<TextureCache>()->getWhiteTexture());
}
batch.setResourceTexture(render::ShapePipeline::Slot::NORMAL_FITTING_MAP,
DependencyManager::get<TextureCache>()->getNormalFittingTexture());
return pipeline;
}
uint32_t toCompactColor(const glm::vec4& color) {
uint32_t compactColor = ((int(color.x * 255.0f) & 0xFF)) |
((int(color.y * 255.0f) & 0xFF) << 8) |
@ -1852,46 +1913,51 @@ uint32_t toCompactColor(const glm::vec4& color) {
static const size_t INSTANCE_COLOR_BUFFER = 0;
template <typename F>
void renderInstances(const std::string& name, gpu::Batch& batch, const glm::vec4& color, F f) {
void renderInstances(const std::string& name, gpu::Batch& batch, const glm::vec4& color, bool isWire,
const render::ShapePipelinePointer& pipeline, GeometryCache::Shape shape) {
// Add pipeline to name
std::string instanceName = name + std::to_string(std::hash<render::ShapePipelinePointer>()(pipeline));
// Add color to named buffer
{
gpu::BufferPointer instanceColorBuffer = batch.getNamedBuffer(name, INSTANCE_COLOR_BUFFER);
gpu::BufferPointer instanceColorBuffer = batch.getNamedBuffer(instanceName, INSTANCE_COLOR_BUFFER);
auto compactColor = toCompactColor(color);
instanceColorBuffer->append(compactColor);
}
batch.setupNamedCalls(name, [f](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch);
f(batch, data);
// Add call to named buffer
batch.setupNamedCalls(instanceName, [isWire, pipeline, shape](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
batch.setPipeline(pipeline->pipeline);
pipeline->prepare(batch);
if (isWire) {
DependencyManager::get<GeometryCache>()->renderWireShapeInstances(batch, shape, data.count(), data.buffers[INSTANCE_COLOR_BUFFER]);
} else {
DependencyManager::get<GeometryCache>()->renderShapeInstances(batch, shape, data.count(), data.buffers[INSTANCE_COLOR_BUFFER]);
}
});
}
void GeometryCache::renderSolidSphereInstance(gpu::Batch& batch, const glm::vec4& color) {
void GeometryCache::renderSolidSphereInstance(gpu::Batch& batch, const glm::vec4& color, const render::ShapePipelinePointer& pipeline) {
static const std::string INSTANCE_NAME = __FUNCTION__;
renderInstances(INSTANCE_NAME, batch, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->renderShapeInstances(batch, GeometryCache::Sphere, data.count(),
data.buffers[INSTANCE_COLOR_BUFFER]);
});
renderInstances(INSTANCE_NAME, batch, color, false, pipeline, GeometryCache::Sphere);
}
void GeometryCache::renderWireSphereInstance(gpu::Batch& batch, const glm::vec4& color) {
void GeometryCache::renderWireSphereInstance(gpu::Batch& batch, const glm::vec4& color, const render::ShapePipelinePointer& pipeline) {
static const std::string INSTANCE_NAME = __FUNCTION__;
renderInstances(INSTANCE_NAME, batch, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->renderWireShapeInstances(batch, GeometryCache::Sphere, data.count(),
data.buffers[INSTANCE_COLOR_BUFFER]);
});
renderInstances(INSTANCE_NAME, batch, color, true, pipeline, GeometryCache::Sphere);
}
// Enable this in a debug build to cause 'box' entities to iterate through all the
// available shape types, both solid and wireframes
//#define DEBUG_SHAPES
void GeometryCache::renderSolidCubeInstance(gpu::Batch& batch, const glm::vec4& color) {
void GeometryCache::renderSolidCubeInstance(gpu::Batch& batch, const glm::vec4& color, const render::ShapePipelinePointer& pipeline) {
static const std::string INSTANCE_NAME = __FUNCTION__;
#ifdef DEBUG_SHAPES
static auto startTime = usecTimestampNow();
renderInstances(INSTANCE_NAME, batch, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
renderInstances(INSTANCE_NAME, batch, color, pipeline, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
auto usecs = usecTimestampNow();
usecs -= startTime;
@ -1916,26 +1982,17 @@ void GeometryCache::renderSolidCubeInstance(gpu::Batch& batch, const glm::vec4&
// For the first half second for a given shape, show the wireframe, for the second half, show the solid.
if (fractionalSeconds > 0.5f) {
DependencyManager::get<GeometryCache>()->renderShapeInstances(batch, shape, data.count(),
data.buffers[INSTANCE_COLOR_BUFFER]);
renderInstances(INSTANCE_NAME, batch, color, true, pipeline, shape);
} else {
DependencyManager::get<GeometryCache>()->renderWireShapeInstances(batch, shape, data.count(),
data.buffers[INSTANCE_COLOR_BUFFER]);
renderInstances(INSTANCE_NAME, batch, color, false, pipeline, shape);
}
});
#else
renderInstances(INSTANCE_NAME, batch, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->renderCubeInstances(batch, data.count(),
data.buffers[INSTANCE_COLOR_BUFFER]);
});
renderInstances(INSTANCE_NAME, batch, color, false, pipeline, GeometryCache::Cube);
#endif
}
void GeometryCache::renderWireCubeInstance(gpu::Batch& batch, const glm::vec4& color) {
void GeometryCache::renderWireCubeInstance(gpu::Batch& batch, const glm::vec4& color, const render::ShapePipelinePointer& pipeline) {
static const std::string INSTANCE_NAME = __FUNCTION__;
renderInstances(INSTANCE_NAME, batch, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->renderWireCubeInstances(batch, data.count(),
data.buffers[INSTANCE_COLOR_BUFFER]);
});
renderInstances(INSTANCE_NAME, batch, color, true, pipeline, GeometryCache::Cube);
}

View file

@ -150,45 +150,55 @@ public:
static const int UNKNOWN_ID;
/// Sets up the state necessary to render static untextured geometry with the simple program.
gpu::PipelinePointer bindSimpleProgram(gpu::Batch& batch, bool textured = false, bool culled = true,
bool emissive = false, bool depthBias = false);
// Bind the pipeline and get the state to render static geometry
void bindSimpleProgram(gpu::Batch& batch, bool textured = false, bool culled = true,
bool emissive = false, bool depthBias = false);
// Get the pipeline to render static geometry
gpu::PipelinePointer getSimplePipeline(bool textured = false, bool culled = true,
bool emissive = false, bool depthBias = false);
render::ShapePipelinePointer getShapePipeline() { return GeometryCache::_simplePipeline; }
void renderSolidSphereInstance(gpu::Batch& batch, const glm::vec4& color);
void renderSolidSphereInstance(gpu::Batch& batch, const glm::vec3& color) {
renderSolidSphereInstance(batch, glm::vec4(color, 1.0));
}
void renderWireSphereInstance(gpu::Batch& batch, const glm::vec4& color);
void renderWireSphereInstance(gpu::Batch& batch, const glm::vec3& color) {
renderWireSphereInstance(batch, glm::vec4(color, 1.0));
}
void renderSolidCubeInstance(gpu::Batch& batch, const glm::vec4& color);
void renderSolidCubeInstance(gpu::Batch& batch, const glm::vec3& color) {
renderSolidCubeInstance(batch, glm::vec4(color, 1.0));
}
void renderWireCubeInstance(gpu::Batch& batch, const glm::vec4& color);
void renderWireCubeInstance(gpu::Batch& batch, const glm::vec3& color) {
renderWireCubeInstance(batch, glm::vec4(color, 1.0));
}
// Static (instanced) geometry
void renderShapeInstances(gpu::Batch& batch, Shape shape, size_t count, gpu::BufferPointer& colorBuffer);
void renderWireShapeInstances(gpu::Batch& batch, Shape shape, size_t count, gpu::BufferPointer& colorBuffer);
void renderSolidSphereInstance(gpu::Batch& batch, const glm::vec4& color,
const render::ShapePipelinePointer& pipeline = _simplePipeline);
void renderSolidSphereInstance(gpu::Batch& batch, const glm::vec3& color,
const render::ShapePipelinePointer& pipeline = _simplePipeline) {
renderSolidSphereInstance(batch, glm::vec4(color, 1.0f), pipeline);
}
void renderWireSphereInstance(gpu::Batch& batch, const glm::vec4& color,
const render::ShapePipelinePointer& pipeline = _simplePipeline);
void renderWireSphereInstance(gpu::Batch& batch, const glm::vec3& color,
const render::ShapePipelinePointer& pipeline = _simplePipeline) {
renderWireSphereInstance(batch, glm::vec4(color, 1.0f), pipeline);
}
void renderSolidCubeInstance(gpu::Batch& batch, const glm::vec4& color,
const render::ShapePipelinePointer& pipeline = _simplePipeline);
void renderSolidCubeInstance(gpu::Batch& batch, const glm::vec3& color,
const render::ShapePipelinePointer& pipeline = _simplePipeline) {
renderSolidCubeInstance(batch, glm::vec4(color, 1.0f), pipeline);
}
void renderWireCubeInstance(gpu::Batch& batch, const glm::vec4& color,
const render::ShapePipelinePointer& pipeline = _simplePipeline);
void renderWireCubeInstance(gpu::Batch& batch, const glm::vec3& color,
const render::ShapePipelinePointer& pipeline = _simplePipeline) {
renderWireCubeInstance(batch, glm::vec4(color, 1.0f), pipeline);
}
// Dynamic geometry
void renderShape(gpu::Batch& batch, Shape shape);
void renderWireShape(gpu::Batch& batch, Shape shape);
size_t getShapeTriangleCount(Shape shape);
void renderCubeInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer colorBuffer);
void renderWireCubeInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer colorBuffer);
void renderCube(gpu::Batch& batch);
void renderWireCube(gpu::Batch& batch);
size_t getCubeTriangleCount();
void renderSphereInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer colorBuffer);
void renderWireSphereInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer colorBuffer);
void renderSphere(gpu::Batch& batch);
void renderWireSphere(gpu::Batch& batch);
size_t getSphereTriangleCount();
@ -364,11 +374,9 @@ private:
QHash<QUrl, QWeakPointer<NetworkGeometry> > _networkGeometry;
gpu::PipelinePointer getPipeline(SimpleProgramKey config);
gpu::ShaderPointer _simpleShader;
gpu::ShaderPointer _emissiveShader;
static render::ShapePipelinePointer _simplePipeline;
QHash<SimpleProgramKey, gpu::PipelinePointer> _simplePrograms;
};

View file

@ -225,8 +225,6 @@ void MeshPartPayload::render(RenderArgs* args) const {
gpu::Batch& batch = *(args->_batch);
ShapeKey key = getShapeKey();
auto locations = args->_pipeline->locations;
assert(locations);
@ -239,13 +237,6 @@ void MeshPartPayload::render(RenderArgs* args) const {
// apply material properties
bindMaterial(batch, locations);
// TODO: We should be able to do that just in the renderTransparentJob
if (key.isTranslucent() && locations->lightBufferUnit >= 0) {
PerformanceTimer perfTimer("DLE->setupTransparent()");
DependencyManager::get<DeferredLightingEffect>()->setupTransparent(args, locations->lightBufferUnit);
}
if (args) {
args->_details._materialSwitches++;
}
@ -475,8 +466,7 @@ void ModelMeshPartPayload::render(RenderArgs* args) const {
gpu::Batch& batch = *(args->_batch);
ShapeKey key = getShapeKey();
if (!key.isValid()) {
if (!getShapeKey().isValid()) {
return;
}
@ -517,13 +507,6 @@ void ModelMeshPartPayload::render(RenderArgs* args) const {
// apply material properties
bindMaterial(batch, locations);
// TODO: We should be able to do that just in the renderTransparentJob
if (key.isTranslucent() && locations->lightBufferUnit >= 0) {
PerformanceTimer perfTimer("DLE->setupTransparent()");
DependencyManager::get<DeferredLightingEffect>()->setupTransparent(args, locations->lightBufferUnit);
}
if (args) {
args->_details._materialSwitches++;
}

View file

@ -127,7 +127,8 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
addJob<DrawStatus>("DrawStatus", opaques, DrawStatus(statusIconMap));
}
addJob<DrawOverlay3D>("DrawOverlay3D");
addJob<DrawOverlay3D>("DrawOverlay3DOpaque", ItemFilter::Builder::opaqueShape().withLayered());
addJob<DrawOverlay3D>("DrawOverlay3DTransparent", ItemFilter::Builder::transparentShape().withLayered());
addJob<HitEffect>("HitEffect");
@ -180,7 +181,7 @@ void DrawDeferred::run(const SceneContextPointer& sceneContext, const RenderCont
});
}
DrawOverlay3D::DrawOverlay3D() : _shapePlumber{ std::make_shared<ShapePlumber>() } {
DrawOverlay3D::DrawOverlay3D(ItemFilter filter) : _filter{ filter }, _shapePlumber{ std::make_shared<ShapePlumber>() } {
initOverlay3DPipelines(*_shapePlumber);
}
@ -190,7 +191,7 @@ void DrawOverlay3D::run(const SceneContextPointer& sceneContext, const RenderCon
// render backgrounds
auto& scene = sceneContext->_scene;
auto& items = scene->getMasterBucket().at(ItemFilter::Builder::opaqueShape().withLayered());
auto& items = scene->getMasterBucket().at(_filter);
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
@ -203,7 +204,6 @@ void DrawOverlay3D::run(const SceneContextPointer& sceneContext, const RenderCon
}
}
config->numItems = (int)inItems.size();
config->numDrawn = (int)inItems.size();
if (!inItems.empty()) {
RenderArgs* args = renderContext->args;

View file

@ -88,14 +88,11 @@ public:
class DrawOverlay3DConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(int numItems READ getNumItems)
Q_PROPERTY(int numDrawn READ getNumDrawn)
Q_PROPERTY(int maxDrawn MEMBER maxDrawn NOTIFY dirty)
public:
int getNumItems() { return numItems; }
int getNumDrawn() { return numDrawn; }
int numItems{ 0 };
int numDrawn{ 0 };
int maxDrawn{ -1 };
signals:
void dirty();
@ -106,12 +103,13 @@ public:
using Config = DrawOverlay3DConfig;
using JobModel = render::Job::Model<DrawOverlay3D, Config>;
DrawOverlay3D();
DrawOverlay3D(render::ItemFilter filter);
void configure(const Config& config) { _maxDrawn = config.maxDrawn; }
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
protected:
render::ItemFilter _filter;
render::ShapePlumberPointer _shapePlumber;
int _maxDrawn; // initialized by Config
};

View file

@ -13,6 +13,7 @@
#include <gpu/Context.h>
#include <gpu/StandardShaderLib.h>
#include "DeferredLightingEffect.h"
#include "TextureCache.h"
#include "render/DrawTask.h"
@ -26,6 +27,7 @@
#include "skin_model_normal_map_vert.h"
#include "model_frag.h"
#include "model_emissive_frag.h"
#include "model_shadow_frag.h"
#include "model_normal_map_frag.h"
#include "model_normal_specular_map_frag.h"
@ -35,9 +37,13 @@
#include "model_lightmap_normal_specular_map_frag.h"
#include "model_lightmap_specular_map_frag.h"
#include "model_translucent_frag.h"
#include "model_translucent_emissive_frag.h"
#include "overlay3D_vert.h"
#include "overlay3D_frag.h"
#include "overlay3D_translucent_frag.h"
#include "overlay3D_emissive_frag.h"
#include "overlay3D_translucent_emissive_frag.h"
#include "drawOpaqueStencil_frag.h"
@ -58,22 +64,85 @@ void initStencilPipeline(gpu::PipelinePointer& pipeline) {
pipeline = gpu::Pipeline::create(program, state);
}
void initOverlay3DPipelines(ShapePlumber& plumber) {
auto vs = gpu::Shader::createVertex(std::string(overlay3D_vert));
auto ps = gpu::Shader::createPixel(std::string(overlay3D_frag));
auto program = gpu::Shader::createProgram(vs, ps);
auto opaqueState = std::make_shared<gpu::State>();
opaqueState->setDepthTest(false);
opaqueState->setBlendFunction(false);
plumber.addPipeline(ShapeKey::Filter::Builder().withOpaque(), program, opaqueState);
gpu::BufferView getDefaultMaterialBuffer() {
model::Material::Schema schema;
schema._diffuse = vec3(1.0f);
schema._opacity = 1.0f;
schema._metallic = vec3(0.1f);
schema._gloss = 10.0f;
return gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(model::Material::Schema), (const gpu::Byte*) &schema));
}
void pipelineBatchSetter(const ShapePipeline& pipeline, gpu::Batch& batch) {
if (pipeline.locations->normalFittingMapUnit > -1) {
batch.setResourceTexture(pipeline.locations->normalFittingMapUnit,
DependencyManager::get<TextureCache>()->getNormalFittingTexture());
void batchSetter(const ShapePipeline& pipeline, gpu::Batch& batch) {
// Set a default diffuse map
batch.setResourceTexture(render::ShapePipeline::Slot::DIFFUSE_MAP,
DependencyManager::get<TextureCache>()->getWhiteTexture());
// Set a default normal map
batch.setResourceTexture(render::ShapePipeline::Slot::NORMAL_FITTING_MAP,
DependencyManager::get<TextureCache>()->getNormalFittingTexture());
// Set default coordinates
if (pipeline.locations->texcoordMatrices >= 0) {
static const glm::mat4 TEX_COORDS[2];
batch._glUniformMatrix4fv(pipeline.locations->texcoordMatrices, 2, false, (const float*)&TEX_COORDS);
}
// Set a default material
if (pipeline.locations->materialBufferUnit >= 0) {
static const gpu::BufferView OPAQUE_SCHEMA_BUFFER = getDefaultMaterialBuffer();
batch.setUniformBuffer(ShapePipeline::Slot::MATERIAL_GPU, OPAQUE_SCHEMA_BUFFER);
}
}
void lightBatchSetter(const ShapePipeline& pipeline, gpu::Batch& batch) {
batchSetter(pipeline, batch);
// Set the light
if (pipeline.locations->lightBufferUnit >= 0) {
DependencyManager::get<DeferredLightingEffect>()->setupBatch(batch, pipeline.locations->lightBufferUnit);
}
}
void initOverlay3DPipelines(ShapePlumber& plumber) {
auto vertex = gpu::Shader::createVertex(std::string(overlay3D_vert));
auto pixel = gpu::Shader::createPixel(std::string(overlay3D_frag));
auto pixelTranslucent = gpu::Shader::createPixel(std::string(overlay3D_translucent_frag));
auto pixelEmissive = gpu::Shader::createPixel(std::string(overlay3D_emissive_frag));
auto pixelTranslucentEmissive = gpu::Shader::createPixel(std::string(overlay3D_translucent_emissive_frag));
auto opaqueProgram = gpu::Shader::createProgram(vertex, pixel);
auto translucentProgram = gpu::Shader::createProgram(vertex, pixelTranslucent);
auto emissiveOpaqueProgram = gpu::Shader::createProgram(vertex, pixelEmissive);
auto emissiveTranslucentProgram = gpu::Shader::createProgram(vertex, pixelTranslucentEmissive);
for (int i = 0; i < 8; i++) {
bool isCulled = (i & 1);
bool isBiased = (i & 2);
bool isOpaque = (i & 4);
auto state = std::make_shared<gpu::State>();
state->setDepthTest(false);
state->setCullMode(isCulled ? gpu::State::CULL_BACK : gpu::State::CULL_NONE);
if (isBiased) {
state->setDepthBias(1.0f);
state->setDepthBiasSlopeScale(1.0f);
}
if (isOpaque) {
// Soft edges
state->setBlendFunction(true,
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA);
} else {
state->setBlendFunction(true,
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
}
ShapeKey::Filter::Builder builder;
isCulled ? builder.withCullFace() : builder.withoutCullFace();
isBiased ? builder.withDepthBias() : builder.withoutDepthBias();
isOpaque ? builder.withOpaque() : builder.withTranslucent();
auto simpleProgram = isOpaque ? opaqueProgram : translucentProgram;
auto emissiveProgram = isOpaque ? emissiveOpaqueProgram : emissiveTranslucentProgram;
plumber.addPipeline(builder.withoutEmissive().build(), simpleProgram, state, &lightBatchSetter);
plumber.addPipeline(builder.withEmissive().build(), emissiveProgram, state, &batchSetter);
}
}
@ -82,31 +151,43 @@ void initDeferredPipelines(render::ShapePlumber& plumber) {
using ShaderPointer = gpu::ShaderPointer;
auto addPipeline = [&plumber](const Key& key, const ShaderPointer& vertexShader, const ShaderPointer& pixelShader) {
auto state = std::make_shared<gpu::State>();
// Cull backface
state->setCullMode(gpu::State::CULL_BACK);
// Z test depends on transparency
state->setDepthTest(true, !key.isTranslucent(), gpu::LESS_EQUAL);
// Blend if transparent
state->setBlendFunction(key.isTranslucent(),
// For transparency, keep the highlight intensity
gpu::State::ONE, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
// These keyvalues' pipelines will be added by this lamdba in addition to the key passed
assert(!key.isWireFrame());
assert(!key.isDepthBiased());
assert(key.isCullFace());
ShaderPointer program = gpu::Shader::createProgram(vertexShader, pixelShader);
plumber.addPipeline(key, program, state, &pipelineBatchSetter);
// Add a wireframe version
if (!key.isWireFrame()) {
auto wireFrameKey = Key::Builder(key).withWireframe();
auto wireFrameState = std::make_shared<gpu::State>(state->getValues());
for (int i = 0; i < 8; i++) {
bool isCulled = (i & 1);
bool isBiased = (i & 2);
bool isWireframed = (i & 4);
wireFrameState->setFillMode(gpu::State::FILL_LINE);
ShapeKey::Builder builder(key);
auto state = std::make_shared<gpu::State>();
plumber.addPipeline(wireFrameKey, program, wireFrameState, &pipelineBatchSetter);
// Depth test depends on transparency
state->setDepthTest(true, !key.isTranslucent(), gpu::LESS_EQUAL);
state->setBlendFunction(key.isTranslucent(),
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
if (!isCulled) {
builder.withoutCullFace();
}
state->setCullMode(isCulled ? gpu::State::CULL_BACK : gpu::State::CULL_NONE);
if (isWireframed) {
builder.withWireframe();
state->setFillMode(gpu::State::FILL_LINE);
}
if (isBiased) {
builder.withDepthBias();
state->setDepthBias(1.0f);
state->setDepthBiasSlopeScale(1.0f);
}
plumber.addPipeline(builder.build(), program, state,
key.isTranslucent() ? &lightBatchSetter : &batchSetter);
}
};
@ -122,113 +203,99 @@ void initDeferredPipelines(render::ShapePlumber& plumber) {
// Pixel shaders
auto modelPixel = gpu::Shader::createPixel(std::string(model_frag));
auto modelEmissivePixel = gpu::Shader::createPixel(std::string(model_emissive_frag));
auto modelNormalMapPixel = gpu::Shader::createPixel(std::string(model_normal_map_frag));
auto modelSpecularMapPixel = gpu::Shader::createPixel(std::string(model_specular_map_frag));
auto modelNormalSpecularMapPixel = gpu::Shader::createPixel(std::string(model_normal_specular_map_frag));
auto modelTranslucentPixel = gpu::Shader::createPixel(std::string(model_translucent_frag));
auto modelTranslucentEmissivePixel = gpu::Shader::createPixel(std::string(model_translucent_emissive_frag));
auto modelShadowPixel = gpu::Shader::createPixel(std::string(model_shadow_frag));
auto modelLightmapPixel = gpu::Shader::createPixel(std::string(model_lightmap_frag));
auto modelLightmapNormalMapPixel = gpu::Shader::createPixel(std::string(model_lightmap_normal_map_frag));
auto modelLightmapSpecularMapPixel = gpu::Shader::createPixel(std::string(model_lightmap_specular_map_frag));
auto modelLightmapNormalSpecularMapPixel = gpu::Shader::createPixel(std::string(model_lightmap_normal_specular_map_frag));
// Fill the pipelineLib
// TODO: Refactor this to use a filter
// Opaques
addPipeline(
Key::Builder(),
modelVertex, modelPixel);
addPipeline(
Key::Builder().withEmissive(),
modelVertex, modelEmissivePixel);
addPipeline(
Key::Builder().withTangents(),
modelNormalMapVertex, modelNormalMapPixel);
addPipeline(
Key::Builder().withSpecular(),
modelVertex, modelSpecularMapPixel);
addPipeline(
Key::Builder().withTangents().withSpecular(),
modelNormalMapVertex, modelNormalSpecularMapPixel);
// Translucents
addPipeline(
Key::Builder().withTranslucent(),
modelVertex, modelTranslucentPixel);
// FIXME Ignore lightmap for translucents meshpart
addPipeline(
Key::Builder().withTranslucent().withEmissive(),
modelVertex, modelTranslucentEmissivePixel);
addPipeline(
Key::Builder().withTranslucent().withTangents(),
modelNormalMapVertex, modelTranslucentPixel);
addPipeline(
Key::Builder().withTranslucent().withSpecular(),
modelVertex, modelTranslucentPixel);
addPipeline(
Key::Builder().withTranslucent().withTangents().withSpecular(),
modelNormalMapVertex, modelTranslucentPixel);
addPipeline(
// FIXME: Ignore lightmap for translucents meshpart
Key::Builder().withTranslucent().withLightmap(),
modelVertex, modelTranslucentPixel);
addPipeline(
Key::Builder().withTangents().withTranslucent(),
modelNormalMapVertex, modelTranslucentPixel);
addPipeline(
Key::Builder().withSpecular().withTranslucent(),
modelVertex, modelTranslucentPixel);
addPipeline(
Key::Builder().withTangents().withSpecular().withTranslucent(),
modelNormalMapVertex, modelTranslucentPixel);
// Lightmapped
addPipeline(
Key::Builder().withLightmap(),
modelLightmapVertex, modelLightmapPixel);
addPipeline(
Key::Builder().withLightmap().withTangents(),
modelLightmapNormalMapVertex, modelLightmapNormalMapPixel);
addPipeline(
Key::Builder().withLightmap().withSpecular(),
modelLightmapVertex, modelLightmapSpecularMapPixel);
addPipeline(
Key::Builder().withLightmap().withTangents().withSpecular(),
modelLightmapNormalMapVertex, modelLightmapNormalSpecularMapPixel);
// Skinned
addPipeline(
Key::Builder().withSkinned(),
skinModelVertex, modelPixel);
addPipeline(
Key::Builder().withSkinned().withTangents(),
skinModelNormalMapVertex, modelNormalMapPixel);
addPipeline(
Key::Builder().withSkinned().withSpecular(),
skinModelVertex, modelSpecularMapPixel);
addPipeline(
Key::Builder().withSkinned().withTangents().withSpecular(),
skinModelNormalMapVertex, modelNormalSpecularMapPixel);
// Skinned and Translucent
addPipeline(
Key::Builder().withSkinned().withTranslucent(),
skinModelVertex, modelTranslucentPixel);
addPipeline(
Key::Builder().withSkinned().withTangents().withTranslucent(),
Key::Builder().withSkinned().withTranslucent().withTangents(),
skinModelNormalMapVertex, modelTranslucentPixel);
addPipeline(
Key::Builder().withSkinned().withSpecular().withTranslucent(),
Key::Builder().withSkinned().withTranslucent().withSpecular(),
skinModelVertex, modelTranslucentPixel);
addPipeline(
Key::Builder().withSkinned().withTangents().withSpecular().withTranslucent(),
Key::Builder().withSkinned().withTranslucent().withTangents().withSpecular(),
skinModelNormalMapVertex, modelTranslucentPixel);
// Depth-only
addPipeline(
Key::Builder().withDepthOnly(),
modelShadowVertex, modelShadowPixel);
addPipeline(
Key::Builder().withSkinned().withDepthOnly(),
skinModelShadowVertex, modelShadowPixel);
}

View file

@ -72,12 +72,12 @@ float TextRenderer3D::getFontSize() const {
}
void TextRenderer3D::draw(gpu::Batch& batch, float x, float y, const QString& str, const glm::vec4& color,
const glm::vec2& bounds) {
const glm::vec2& bounds, bool layered) {
// The font does all the OpenGL work
if (_font) {
// Cache color so that the pointer stays valid.
_color = color;
_font->drawString(batch, x, y, str, &_color, _effectType, bounds);
_font->drawString(batch, x, y, str, &_color, _effectType, bounds, layered);
}
}

View file

@ -40,7 +40,7 @@ public:
float getFontSize() const; // Pixel size
void draw(gpu::Batch& batch, float x, float y, const QString& str, const glm::vec4& color = glm::vec4(1.0f),
const glm::vec2& bounds = glm::vec2(-1.0f));
const glm::vec2& bounds = glm::vec2(-1.0f), bool layered = false);
private:
TextRenderer3D(const char* family, float pointSize, int weight = -1, bool italic = false,

View file

@ -1,7 +1,7 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
// model.vert
// model.slv
// vertex shader
//
// Created by Andrzej Kapolka on 10/14/13.
@ -17,19 +17,18 @@
<$declareStandardTransform()$>
const int MAX_TEXCOORDS = 2;
uniform mat4 texcoordMatrices[MAX_TEXCOORDS];
out vec3 _color;
out float _alpha;
out vec2 _texCoord0;
out vec4 _position;
out vec3 _normal;
out vec3 _color;
out vec2 _texCoord0;
void main(void) {
// pass along the diffuse color in linear space
_color = colorToLinearRGB(inColor.xyz);
_alpha = inColor.w;
// and the texture coordinates
_texCoord0 = (texcoordMatrices[0] * vec4(inTexCoord0.st, 0.0, 1.0)).st;
// standard transform

View file

@ -0,0 +1,38 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// model_emissive.frag
// fragment shader
//
// Created by Zach Pomerantz on 2/3/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include DeferredBufferWrite.slh@>
<@include model/Material.slh@>
uniform sampler2D diffuseMap;
in vec2 _texCoord0;
in vec3 _normal;
in vec3 _color;
in float _alpha;
void main(void) {
vec4 texel = texture(diffuseMap, _texCoord0);
Material mat = getMaterial();
vec3 fragColor = getMaterialDiffuse(mat) * texel.rgb * _color;
packDeferredFragmentLightmap(
normalize(_normal),
texel.a,
vec3(1.0),
getMaterialSpecular(mat),
getMaterialShininess(mat),
fragColor);
}

View file

@ -11,39 +11,14 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include model/Material.slh@>
// Everything about global lighting
<@include DeferredLighting.slh@>
<@include gpu/Transform.slh@>
<$declareStandardCameraTransform()$>
// Everything about light
<@include model/Light.slh@>
// The view Matrix
//uniform mat4 invViewMat;
vec4 evalNormalColor(vec3 dir, float opacity) {
bool isX = (abs(dir.x) > 0.99);
bool isY = (abs(dir.y) > 0.99);
bool isZ = (abs(dir.z) > 0.99);
if (isX || isY || isZ) {
bool negX = (dir.x < -0.995);
bool negY = (dir.y < -0.995);
bool negZ = (dir.z < -0.995);
if (negX || negY || negZ) {
return vec4(float(isX), float(isY), float(isZ), 0.2);
} else {
return vec4(float(isX), float(isY), float(isZ), 1.0);
}
}
return vec4(0.5 * dir + vec3(0.5), opacity);
}
<@include gpu/Transform.slh@>
<$declareStandardCameraTransform()$>
vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss, float opacity) {
@ -60,42 +35,37 @@ vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 d
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, specular, gloss);
color += vec3(opacity * diffuse + shading.rgb) * shading.w * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
color += vec3(diffuse * shading.w * opacity + shading.rgb) * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
//return vec4(color, opacity);
return vec4(color, opacity);
//return vec4(diffuse.rgb, opacity);
//return evalNormalColor(fragEyeDir, opacity);
}
// the diffuse texture
uniform sampler2D diffuseMap;
in vec4 _position;
in vec2 _texCoord0;
in vec4 _position;
in vec3 _normal;
in vec3 _color;
in float _alpha;
out vec4 _fragColor;
void main(void) {
vec3 fragPosition = _position.xyz;
// Fetch diffuse map
vec4 diffuse = texture(diffuseMap, _texCoord0);
Material mat = getMaterial();
vec3 fragPosition = _position.xyz;
vec3 fragNormal = normalize(_normal);
float fragOpacity = getMaterialOpacity(mat) * diffuse.a;
vec3 fragDiffuse = getMaterialDiffuse(mat) * diffuse.rgb * _color;
vec3 fragSpecular = getMaterialSpecular(mat);
float fragGloss = getMaterialShininess(mat);
float fragGloss = getMaterialShininess(mat) / 128;
float fragOpacity = getMaterialOpacity(mat) * diffuse.a * _alpha;
_fragColor = evalGlobalColor(1.0,
fragPosition,
fragNormal,
fragDiffuse,
fragSpecular,
fragGloss,
fragOpacity);
fragPosition,
fragNormal,
fragDiffuse,
fragSpecular,
fragGloss,
fragOpacity);
}

View file

@ -0,0 +1,33 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// model_translucent_emissive.frag
// fragment shader
//
// Created by Zach Pomerantz on 2/3/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include model/Material.slh@>
uniform sampler2D diffuseMap;
in vec2 _texCoord0;
in vec3 _color;
in float _alpha;
out vec4 _fragColor;
void main(void) {
vec4 diffuse = texture(diffuseMap, _texCoord0);
Material mat = getMaterial();
vec3 fragColor = getMaterialDiffuse(mat) * diffuse.rgb * _color;
float fragOpacity = getMaterialOpacity(mat) * diffuse.a * _alpha;
_fragColor = vec4(fragColor, fragOpacity);
}

View file

@ -1,7 +1,7 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
// model.frag
// overlay3D.slf
// fragment shader
//
// Created by Sam Gateau on 6/16/15.
@ -11,20 +11,64 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D diffuseMap;
<@include DeferredLighting.slh@>
<@include model/Light.slh@>
in vec2 varTexcoord;
<@include gpu/Transform.slh@>
<$declareStandardCameraTransform()$>
in vec3 varEyeNormal;
vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss, float opacity) {
in vec4 varColor;
out vec4 outFragColor;
// Need the light now
Light light = getLight();
TransformCamera cam = getTransformCamera();
vec3 fragNormal;
<$transformEyeToWorldDir(cam, normal, fragNormal)$>
vec3 fragEyeVectorView = normalize(-position);
vec3 fragEyeDir;
<$transformEyeToWorldDir(cam, fragEyeVectorView, fragEyeDir)$>
vec3 color = opacity * diffuse.rgb * getLightColor(light) * getLightAmbientIntensity(light);
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, specular, gloss);
color += vec3(diffuse * shading.w * opacity + shading.rgb) * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
return vec4(color, opacity);
}
uniform sampler2D originalTexture;
in vec2 _texCoord0;
in vec4 _position;
in vec3 _normal;
in vec3 _color;
in float _alpha;
out vec4 _fragColor;
void main(void) {
vec4 diffuse = texture(diffuseMap, varTexcoord.st);
if (diffuse.a < 0.5) {
vec4 diffuse = texture(originalTexture, _texCoord0);
vec3 fragPosition = _position.xyz;
vec3 fragNormal = normalize(_normal);
vec3 fragDiffuse = diffuse.rgb * _color;
vec3 fragSpecular = vec3(0.1);
float fragGloss = 10.0 / 128.0;
float fragOpacity = diffuse.a;
if (fragOpacity <= 0.1) {
discard;
}
outFragColor = vec4(varColor * diffuse);
vec4 color = evalGlobalColor(1.0,
fragPosition,
fragNormal,
fragDiffuse,
fragSpecular,
fragGloss,
fragOpacity);
// Apply standard tone mapping
_fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);
}

View file

@ -2,6 +2,7 @@
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
// overlay3D.slv
// vertex shader
//
// Created by Sam Gateau on 6/16/15.
// Copyright 2015 High Fidelity, Inc.
@ -15,25 +16,21 @@
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
out vec2 varTexcoord;
// interpolated eye position
out vec4 varEyePosition;
// the interpolated normal
out vec3 varEyeNormal;
out vec4 varColor;
out vec3 _color;
out float _alpha;
out vec2 _texCoord0;
out vec4 _position;
out vec3 _normal;
void main(void) {
varTexcoord = inTexCoord0.xy;
_color = colorToLinearRGB(inColor.xyz);
_alpha = inColor.w;
// pass along the color
varColor = colorToLinearRGBA(inColor);
_texCoord0 = inTexCoord0.st;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, inPosition, varEyePosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, varEyeNormal.xyz)$>
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
}

View file

@ -0,0 +1,32 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// overlay3D_emissive.frag
// fragment shader
//
// Created by Zach Pomerantz on 2/2/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D originalTexture;
in vec2 _texCoord0;
in vec3 _color;
out vec4 _fragColor;
void main(void) {
vec4 diffuse = texture(originalTexture, _texCoord0);
if (diffuse.a <= 0.1) {
discard;
}
vec4 color = vec4(diffuse.rgb * _color, diffuse.a);
// Apply standard tone mapping
_fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);
}

View file

@ -0,0 +1,71 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// overlay3D_translucent.slf
// fragment shader
//
// Created by Sam Gateau on 6/16/15.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include DeferredLighting.slh@>
<@include model/Light.slh@>
<@include gpu/Transform.slh@>
<$declareStandardCameraTransform()$>
vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss, float opacity) {
// Need the light now
Light light = getLight();
TransformCamera cam = getTransformCamera();
vec3 fragNormal;
<$transformEyeToWorldDir(cam, normal, fragNormal)$>
vec3 fragEyeVectorView = normalize(-position);
vec3 fragEyeDir;
<$transformEyeToWorldDir(cam, fragEyeVectorView, fragEyeDir)$>
vec3 color = opacity * diffuse.rgb * getLightColor(light) * getLightAmbientIntensity(light);
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, specular, gloss);
color += vec3(diffuse * shading.w * opacity + shading.rgb) * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
return vec4(color, opacity);
}
uniform sampler2D originalTexture;
in vec2 _texCoord0;
in vec4 _position;
in vec3 _normal;
in vec3 _color;
in float _alpha;
out vec4 _fragColor;
void main(void) {
vec4 diffuse = texture(originalTexture, _texCoord0);
vec3 fragPosition = _position.xyz;
vec3 fragNormal = normalize(_normal);
vec3 fragDiffuse = diffuse.rgb * _color;
vec3 fragSpecular = vec3(0.1);
float fragGloss = 10.0 / 128.0;
float fragOpacity = diffuse.a * _alpha;
vec4 color = evalGlobalColor(1.0,
fragPosition,
fragNormal,
fragDiffuse,
fragSpecular,
fragGloss,
fragOpacity);
// Apply standard tone mapping
_fragColor = vec4(pow(color.xyz, vec3(1.0 / 2.2)), color.w);
}

View file

@ -0,0 +1,27 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// overlay3D_translucent_emissive.frag
// fragment shader
//
// Created by Zach Pomerantz on 2/2/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D originalTexture;
in vec2 _texCoord0;
in vec3 _color;
in float _alpha;
out vec4 _fragColor;
void main(void) {
vec4 diffuse = texture(originalTexture, _texCoord0);
_fragColor = vec4(diffuse.rgb * _color, diffuse.a * _alpha);
}

View file

@ -234,6 +234,10 @@ void Font::setupGPU() {
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
_pipeline = gpu::Pipeline::create(program, state);
auto layeredState = std::make_shared<gpu::State>(state->getValues());
layeredState->setDepthTest(false);
_layeredPipeline = gpu::Pipeline::create(program, layeredState);
}
// Sanity checks
@ -336,7 +340,7 @@ void Font::rebuildVertices(float x, float y, const QString& str, const glm::vec2
}
void Font::drawString(gpu::Batch& batch, float x, float y, const QString& str, const glm::vec4* color,
EffectType effectType, const glm::vec2& bounds) {
EffectType effectType, const glm::vec2& bounds, bool layered) {
if (str == "") {
return;
}
@ -347,7 +351,7 @@ void Font::drawString(gpu::Batch& batch, float x, float y, const QString& str, c
setupGPU();
batch.setPipeline(_pipeline);
batch.setPipeline(layered ? _layeredPipeline : _pipeline);
batch.setResourceTexture(_fontLoc, _texture);
batch._glUniform1i(_outlineLoc, (effectType == OUTLINE_EFFECT));
batch._glUniform4fv(_colorLoc, 1, (const float*)color);

View file

@ -27,7 +27,7 @@ public:
// Render string to batch
void drawString(gpu::Batch& batch, float x, float y, const QString& str,
const glm::vec4* color, EffectType effectType,
const glm::vec2& bound);
const glm::vec2& bound, bool layered = false);
static Font* load(QIODevice& fontFile);
static Font* load(const QString& family);
@ -61,6 +61,7 @@ private:
// gpu structures
gpu::PipelinePointer _pipeline;
gpu::PipelinePointer _layeredPipeline;
gpu::TexturePointer _texture;
gpu::Stream::FormatPointer _format;
gpu::BufferPointer _verticesBuffer;

View file

@ -38,6 +38,7 @@ void renderShape(RenderArgs* args, const ShapePlumberPointer& shapeContext, cons
if (args->_pipeline) {
item.render(args);
}
args->_pipeline = nullptr;
} else if (key.hasOwnPipeline()) {
item.render(args);
} else {

View file

@ -17,6 +17,12 @@
using namespace render;
void ShapePipeline::prepare(gpu::Batch& batch) {
if (batchSetter) {
batchSetter(*this, batch);
}
}
ShapeKey::Filter::Builder::Builder() {
_mask.set(OWN_PIPELINE);
_mask.set(INVALID);
@ -90,13 +96,13 @@ const ShapePipelinePointer ShapePlumber::pickPipeline(RenderArgs* args, const Ke
PipelinePointer shapePipeline(pipelineIterator->second);
auto& batch = args->_batch;
// Setup the one pipeline (to rule them all)
batch->setPipeline(shapePipeline->pipeline);
// Run the pipeline's BatchSetter on the passed in batch
if (shapePipeline->batchSetter) {
shapePipeline->batchSetter(*shapePipeline, *batch);
}
// Setup the one pipeline (to rule them all)
batch->setPipeline(shapePipeline->pipeline);
return shapePipeline;
}

View file

@ -28,7 +28,9 @@ public:
SKINNED,
STEREO,
DEPTH_ONLY,
DEPTH_BIAS,
WIREFRAME,
NO_CULL_FACE,
OWN_PIPELINE,
INVALID,
@ -39,7 +41,7 @@ public:
Flags _flags;
ShapeKey() : _flags{0} {}
ShapeKey() : _flags{ 0 } {}
ShapeKey(const Flags& flags) : _flags{flags} {}
class Builder {
@ -57,7 +59,9 @@ public:
Builder& withSkinned() { _flags.set(SKINNED); return (*this); }
Builder& withStereo() { _flags.set(STEREO); return (*this); }
Builder& withDepthOnly() { _flags.set(DEPTH_ONLY); return (*this); }
Builder& withDepthBias() { _flags.set(DEPTH_BIAS); return (*this); }
Builder& withWireframe() { _flags.set(WIREFRAME); return (*this); }
Builder& withoutCullFace() { _flags.set(NO_CULL_FACE); return (*this); }
Builder& withOwnPipeline() { _flags.set(OWN_PIPELINE); return (*this); }
Builder& invalidate() { _flags.set(INVALID); return (*this); }
@ -107,9 +111,15 @@ public:
Builder& withDepthOnly() { _flags.set(DEPTH_ONLY); _mask.set(DEPTH_ONLY); return (*this); }
Builder& withoutDepthOnly() { _flags.reset(DEPTH_ONLY); _mask.set(DEPTH_ONLY); return (*this); }
Builder& withDepthBias() { _flags.set(DEPTH_BIAS); _mask.set(DEPTH_BIAS); return (*this); }
Builder& withoutDepthBias() { _flags.reset(DEPTH_BIAS); _mask.set(DEPTH_BIAS); return (*this); }
Builder& withWireframe() { _flags.set(WIREFRAME); _mask.set(WIREFRAME); return (*this); }
Builder& withoutWireframe() { _flags.reset(WIREFRAME); _mask.set(WIREFRAME); return (*this); }
Builder& withCullFace() { _flags.reset(NO_CULL_FACE); _mask.set(NO_CULL_FACE); return (*this); }
Builder& withoutCullFace() { _flags.set(NO_CULL_FACE); _mask.set(NO_CULL_FACE); return (*this); }
protected:
friend class Filter;
Flags _flags{0};
@ -130,7 +140,9 @@ public:
bool isSkinned() const { return _flags[SKINNED]; }
bool isStereo() const { return _flags[STEREO]; }
bool isDepthOnly() const { return _flags[DEPTH_ONLY]; }
bool isDepthBiased() const { return _flags[DEPTH_BIAS]; }
bool isWireFrame() const { return _flags[WIREFRAME]; }
bool isCullFace() const { return !_flags[NO_CULL_FACE]; }
bool hasOwnPipeline() const { return _flags[OWN_PIPELINE]; }
bool isValid() const { return !_flags[INVALID]; }
@ -150,21 +162,23 @@ public:
};
};
inline QDebug operator<<(QDebug debug, const ShapeKey& renderKey) {
if (renderKey.isValid()) {
if (renderKey.hasOwnPipeline()) {
inline QDebug operator<<(QDebug debug, const ShapeKey& key) {
if (key.isValid()) {
if (key.hasOwnPipeline()) {
debug << "[ShapeKey: OWN_PIPELINE]";
} else {
debug << "[ShapeKey:"
<< "hasLightmap:" << renderKey.hasLightmap()
<< "hasTangents:" << renderKey.hasTangents()
<< "hasSpecular:" << renderKey.hasSpecular()
<< "hasEmissive:" << renderKey.hasEmissive()
<< "isTranslucent:" << renderKey.isTranslucent()
<< "isSkinned:" << renderKey.isSkinned()
<< "isStereo:" << renderKey.isStereo()
<< "isDepthOnly:" << renderKey.isDepthOnly()
<< "isWireFrame:" << renderKey.isWireFrame()
<< "hasLightmap:" << key.hasLightmap()
<< "hasTangents:" << key.hasTangents()
<< "hasSpecular:" << key.hasSpecular()
<< "hasEmissive:" << key.hasEmissive()
<< "isTranslucent:" << key.isTranslucent()
<< "isSkinned:" << key.isSkinned()
<< "isStereo:" << key.isStereo()
<< "isDepthOnly:" << key.isDepthOnly()
<< "isDepthBiased:" << key.isDepthBiased()
<< "isWireFrame:" << key.isWireFrame()
<< "isCullFace:" << key.isCullFace()
<< "]";
}
} else {
@ -209,6 +223,10 @@ public:
ShapePipeline(gpu::PipelinePointer pipeline, LocationsPointer locations, BatchSetter batchSetter) :
pipeline(pipeline), locations(locations), batchSetter(batchSetter) {}
// Normally, a pipeline is accessed thorugh pickPipeline. If it needs to be set manually,
// after calling setPipeline this method should be called to prepare the pipeline with default buffers.
void prepare(gpu::Batch& batch);
gpu::PipelinePointer pipeline;
std::shared_ptr<Locations> locations;

View file

@ -12,7 +12,12 @@
#ifndef hifi_render_Task_h
#define hifi_render_Task_h
#include <qscriptengine.h> // QObject
#include <QtCore/qobject.h>
#include <QtCore/qjsondocument.h>
#include <QtCore/qjsonobject.h>
#include <QtCore/qjsonvalue.h>
#include <shared/JSONHelpers.h>
#include "Context.h"
@ -65,6 +70,11 @@ public:
bool alwaysEnabled{ true };
bool enabled{ true };
Q_INVOKABLE QString toJSON() { return QJsonDocument(toJsonValue(*this).toObject()).toJson(QJsonDocument::Compact); }
public slots:
void load(const QJsonValue& json) { qObjectFromJsonValue(json, *this); }
};
class TaskConfig : public JobConfig {

View file

@ -12,12 +12,12 @@
#include <QtCore/QJsonObject>
#include <QtCore/QJsonArray>
#include <QtCore/qmetaobject.h>
#include "../RegisteredMetaTypes.h"
template <typename T>
QJsonValue glmToJson(const T& t) {
static const T DEFAULT_VALUE = T();
if (t == DEFAULT_VALUE) {
return QJsonValue();
}
QJsonArray result;
for (auto i = 0; i < t.length(); ++i) {
result.push_back(t[i]);
@ -46,6 +46,10 @@ QJsonValue toJsonValue(const vec3& v) {
return glmToJson(v);
}
QJsonValue toJsonValue(const vec4& v) {
return glmToJson(v);
}
quat quatFromJsonValue(const QJsonValue& q) {
return glmFromJson<quat>(q);
}
@ -57,3 +61,63 @@ vec3 vec3FromJsonValue(const QJsonValue& v) {
return glmFromJson<vec3>(v);
}
vec4 vec4FromJsonValue(const QJsonValue& v) {
if (v.isDouble()) {
return vec4((float)v.toDouble());
}
return glmFromJson<vec4>(v);
}
QJsonValue toJsonValue(const QObject& o) {
QJsonObject json{};
// Add all properties, see http://doc.qt.io/qt-5/qmetaobject.html#propertyCount
const auto& meta = o.metaObject();
for (int i = meta->propertyOffset(); i < meta->propertyCount(); ++i) {
QString name = QString::fromLatin1(meta->property(i).name());
auto type = meta->property(i).userType();
QVariant variant{ meta->property(i).read(&o) };
QJsonValue value;
// User-registered types need explicit conversion
if (type == qMetaTypeId<quat>()) {
value = toJsonValue(variant.value<quat>());
} else if (type == qMetaTypeId<vec3>()) {
value = toJsonValue(variant.value<vec3>());
} else if (type == qMetaTypeId<vec4>()) {
value = toJsonValue(variant.value<vec4>());
} else {
// Qt types are converted automatically
value = QJsonValue::fromVariant(variant);
}
json.insert(name, value);
}
// Add all children (recursively)
const auto children = o.children();
for (const auto& child : children) {
QJsonObject childJson = toJsonValue(*child).toObject();
if (!childJson.empty()) {
json.insert(child->objectName(), childJson);
}
}
return json;
}
void qObjectFromJsonValue(const QJsonValue& j, QObject& o) {
const QJsonObject object = j.toObject();
for (auto it = object.begin(); it != object.end(); it++) {
std::string key = it.key().toStdString();
if (it.value().isObject()) {
QVariant child = o.property(key.c_str());
if (child.isValid()) {
QObject* object = child.value<QObject*>();
qObjectFromJsonValue(it.value(), *object);
}
} else {
o.setProperty(key.c_str(), it.value());
}
}
}

View file

@ -13,11 +13,13 @@
#include "../GLMHelpers.h"
QJsonValue toJsonValue(const quat& q);
QJsonValue toJsonValue(const vec3& q);
QJsonValue toJsonValue(const vec3& v);
QJsonValue toJsonValue(const vec4& v);
QJsonValue toJsonValue(const QObject& o);
quat quatFromJsonValue(const QJsonValue& q);
vec3 vec3FromJsonValue(const QJsonValue& q);
vec3 vec3FromJsonValue(const QJsonValue& v);
vec4 vec4FromJsonValue(const QJsonValue& v);
void qObjectFromJsonValue(const QJsonValue& j, QObject& o);
#endif

View file

@ -239,6 +239,7 @@ public:
}
}
auto pipeline = geometryCache->getSimplePipeline();
for (auto& transform : transforms) {
batch.setModelTransform(transform);
batch.setupNamedCalls(GRID_INSTANCE, [=](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {