Merge branch 'master' of https://github.com/highfidelity/hifi into animationUnification

This commit is contained in:
ZappoMan 2014-11-12 20:39:39 -08:00
commit 4077f6c2ab
66 changed files with 1322 additions and 744 deletions

View file

@ -1,104 +0,0 @@
//
// Test.js
// examples
//
// Created by Ryan Huffman on 5/4/14
// Copyright 2014 High Fidelity, Inc.
//
// This provides very basic unit testing functionality.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
test = function(name, func) {
print("Running test: " + name);
var unitTest = new UnitTest(name, func);
try {
unitTest.run();
print(" Success: " + unitTest.numAssertions + " assertions passed");
} catch (error) {
print(" Failure: " + error.name + " " + error.message);
}
};
AssertionException = function(expected, actual, message) {
print("Creating exception");
this.message = message + "\n: " + actual + " != " + expected;
this.name = 'AssertionException';
};
UnthrownException = function(message) {
print("Creating exception");
this.message = message + "\n";
this.name = 'UnthrownException';
};
UnitTest = function(name, func) {
this.numAssertions = 0;
this.func = func;
};
UnitTest.prototype.run = function() {
this.func();
};
UnitTest.prototype.assertNotEquals = function(expected, actual, message) {
this.numAssertions++;
if (expected == actual) {
throw new AssertionException(expected, actual, message);
}
};
UnitTest.prototype.assertEquals = function(expected, actual, message) {
this.numAssertions++;
if (expected != actual) {
throw new AssertionException(expected, actual, message);
}
};
UnitTest.prototype.assertContains = function (expected, actual, message) {
this.numAssertions++;
if (actual.indexOf(expected) == -1) {
throw new AssertionException(expected, actual, message);
}
};
UnitTest.prototype.assertHasProperty = function(property, actual, message) {
this.numAssertions++;
if (actual[property] === undefined) {
throw new AssertionException(property, actual, message);
}
};
UnitTest.prototype.assertNull = function(value, message) {
this.numAssertions++;
if (value !== null) {
throw new AssertionException(value, null, message);
}
}
UnitTest.prototype.arrayEqual = function(array1, array2, message) {
this.numAssertions++;
if (array1.length !== array2.length) {
throw new AssertionException(array1.length , array2.length , message);
}
for (var i = 0; i < array1.length; ++i) {
if (array1[i] !== array2[i]) {
throw new AssertionException(array1[i], array2[i], i + " " + message);
}
}
}
UnitTest.prototype.raises = function(func, message) {
this.numAssertions++;
try {
func();
} catch (error) {
return;
}
throw new UnthrownException(message);
}

View file

@ -132,15 +132,16 @@ function checkHands(deltaTime) {
}
function playChord(position, volume) {
var options = new AudioInjectionOptions();
options.position = position;
options.volume = volume;
if (Audio.isInjectorPlaying(soundPlaying)) {
print("stopped sound");
Audio.stopInjector(soundPlaying);
}
print("Played sound: " + whichChord + " at volume " + options.volume);
soundPlaying = Audio.playSound(chords[guitarSelector + whichChord], options);
soundPlaying = Audio.playSound(chords[guitarSelector + whichChord], {
position: position,
volume: volume
});
}
function keyPressEvent(event) {

View file

@ -32,10 +32,10 @@ function updateEntity(deltaTime) {
if (Math.random() < CHANCE_OF_PLAYING_SOUND) {
// play a sound at the location of the entity
var options = new AudioInjectionOptions();
options.position = entityPosition;
options.volume = 0.75;
Audio.playSound(sound, options);
Audio.playSound(sound, {
position: entityPosition,
volume: 0.75
});
}
var audioAverageLoudness = MyAvatar.audioAverageLoudness * FACTOR;

View file

@ -17,9 +17,11 @@ var SOUND_TRIGGER_CLEAR = 1000; // milliseconds
var SOUND_TRIGGER_DELAY = 200; // milliseconds
var soundExpiry = 0;
var DateObj = new Date();
var audioOptions = new AudioInjectionOptions();
audioOptions.volume = 0.5;
audioOptions.position = { x: 0, y: 0, z: 0 };
var audioOptions = {
volume: 0.5,
position: { x: 0, y: 0, z: 0 }
}
var hitSounds = new Array();
hitSounds[0] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit1.raw");

View file

@ -33,20 +33,22 @@ function maybePlaySound(deltaTime) {
// Set the location and other info for the sound to play
var whichBird = Math.floor(Math.random() * birds.length);
//print("playing sound # " + whichBird);
var options = new AudioInjectionOptions();
var position = { x: lowerCorner.x + Math.random() * (upperCorner.x - lowerCorner.x),
y: lowerCorner.y + Math.random() * (upperCorner.y - lowerCorner.y),
z: lowerCorner.z + Math.random() * (upperCorner.z - lowerCorner.z) };
options.position = position;
options.volume = BIRD_MASTER_VOLUME;
//
var position = {
x: lowerCorner.x + Math.random() * (upperCorner.x - lowerCorner.x),
y: lowerCorner.y + Math.random() * (upperCorner.y - lowerCorner.y),
z: lowerCorner.z + Math.random() * (upperCorner.z - lowerCorner.z)
};
var options = {
position: position,
volume: BIRD_MASTER_VOLUME
};
var entityId = Entities.addEntity({
type: "Sphere",
position: position,
dimensions: { x: BIRD_SIZE, y: BIRD_SIZE, z: BIRD_SIZE },
color: birds[whichBird].color,
lifetime: 10
});
type: "Sphere",
position: position,
dimensions: { x: BIRD_SIZE, y: BIRD_SIZE, z: BIRD_SIZE },
color: birds[whichBird].color,
lifetime: 10
});
if (useLights) {
var lightId = Entities.addEntity({

View file

@ -172,13 +172,11 @@ function playRandomSound() {
}
function playRandomFootstepSound() {
var whichSound = Math.floor((Math.random() * footstepSounds.length));
var options = new AudioInjectionOptions();
options.position = Avatar.position;
options.volume = 1.0;
Audio.playSound(footstepSounds[whichSound], options);
var whichSound = Math.floor((Math.random() * footstepSounds.length));
Audio.playSound(footstepSounds[whichSound], {
position: Avatar.position,
volume: 1.0
});
}
// Facial Animation

View file

@ -134,13 +134,11 @@ function playRandomSound() {
}
function playRandomFootstepSound() {
var whichSound = Math.floor((Math.random() * footstepSounds.length));
var options = new AudioInjectionOptions();
options.position = Avatar.position;
options.volume = 1.0;
Audio.playSound(footstepSounds[whichSound], options);
var whichSound = Math.floor((Math.random() * footstepSounds.length));
Audio.playSound(footstepSounds[whichSound], {
position: Avatar.position,
volume: 1.0
});
}
// ************************************ Facial Animation **********************************

View file

@ -89,11 +89,11 @@ function maybePlaySound(deltaTime) {
}
function playClap(volume, position) {
var options = new AudioInjectionOptions();
options.position = position;
options.volume = 1.0;
var clip = Math.floor(Math.random() * numberOfSounds);
Audio.playSound(claps[clip], options);
Audio.playSound(claps[clip], {
position: position,
volume: volume
});
}
var FASTEST_CLAP_INTERVAL = 150.0;

View file

@ -63,8 +63,11 @@ function checkSticks(deltaTime) {
// Waiting for change in velocity direction or slowing to trigger drum sound
if ((palmVelocity.y > 0.0) || (speed < STOP_SPEED)) {
state[palm] = 0;
var options = new AudioInjectionOptions();
options.position = Controller.getSpatialControlPosition(palm * 2 + 1);
var options = {
position: Controller.getSpatialControlPosition(palm * 2 + 1);
}
if (strokeSpeed[palm] > 1.0) { strokeSpeed[palm] = 1.0; }
options.volume = strokeSpeed[palm];

View file

@ -68,9 +68,11 @@ var numColors = 9;
var whichColor = 0; // Starting color is 'Copy' mode
// Create sounds for for every script actions that require one
var audioOptions = new AudioInjectionOptions();
audioOptions.volume = 1.0;
audioOptions.position = Vec3.sum(MyAvatar.position, { x: 0, y: 1, z: 0 } ); // start with audio slightly above the avatar
// start with audio slightly above the avatar
var audioOptions = {
position: Vec3.sum(MyAvatar.position, { x: 0, y: 1, z: 0 } ),
volume: 1.0
};
function SoundArray() {
this.audioOptions = audioOptions

View file

@ -135,10 +135,10 @@ function updateBirds(deltaTime) {
// Tweeting behavior
if (birds[i].tweeting == 0) {
if (Math.random() < CHANCE_OF_TWEETING) {
var options = new AudioInjectionOptions();
options.position = properties.position;
options.volume = 0.75;
Audio.playSound(birds[i].tweetSound, options);
Audio.playSound(birds[i].tweetSound, {
position: properties.position,
volume: 0.75
});
birds[i].tweeting = 10;
}
} else {

View file

@ -14,14 +14,6 @@
(function(){
var bird;
function playSound(entityID) {
var options = new AudioInjectionOptions();
var position = MyAvatar.position;
options.position = position;
options.volume = 0.5;
Audio.playSound(bird, options);
};
this.preload = function(entityID) {
print("preload("+entityID.id+")");
bird = new Sound("http://s3.amazonaws.com/hifi-public/sounds/Animals/bushtit_1.raw");
@ -29,6 +21,9 @@
this.clickDownOnEntity = function(entityID, mouseEvent) {
print("clickDownOnEntity()...");
playSound();
Audio.playSound(bird, {
position: MyAvatar.position,
volume: 0.5
});
};
})

View file

@ -14,13 +14,12 @@
(function(){
var bird;
function playSound() {
var options = new AudioInjectionOptions();
var position = MyAvatar.position;
options.position = position;
options.volume = 0.5;
Audio.playSound(bird, options);
};
function playSound(entityID) {
Audio.playSound(bird, {
position: MyAvatar.position,
volume: 0.5
});
}
this.preload = function(entityID) {
print("preload("+entityID.id+")");
@ -31,7 +30,7 @@
playSound();
};
this.leaveEntity = function(entityID) {
this.leaveEntity = function(entityID) {
playSound();
};
})

View file

@ -177,10 +177,10 @@ function playSound(sound, position) {
if (!SOUNDS_ENABLED) {
return;
}
var options = new AudioInjectionOptions();
options.position = position;
options.volume = 1.0;
Audio.playSound(sound, options);
Audio.playSound(sound,{
position: position
});
}
function cleanupFrisbees() {

View file

@ -44,8 +44,9 @@ var targetLaunchSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/
var gunModel = "http://public.highfidelity.io/models/attachments/HaloGun.fst";
var audioOptions = new AudioInjectionOptions();
audioOptions.volume = 0.9;
var audioOptions {
volume: 0.9
}
var shotsFired = 0;

View file

@ -43,8 +43,9 @@ var targetLaunchSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/
var gunModel = "http://public.highfidelity.io/models/attachments/HaloGun.fst";
var audioOptions = new AudioInjectionOptions();
audioOptions.volume = 0.9;
var audioOptions = {
volume: 0.9
}
var shotsFired = 0;

View file

@ -72,15 +72,11 @@ var WATCH_AVATAR_DISTANCE = 2.5;
var sound = new Sound("http://public.highfidelity.io/sounds/Footsteps/FootstepW2Right-12db.wav");
function playSound() {
var options = new AudioInjectionOptions();
var position = MyAvatar.position;
options.position = position;
options.volume = 1.0;
Audio.playSound(sound, options);
Audio.playSound(sound, {
position: MyAvatar.position
});
}
function pullBack() {
saveCameraState();
cameraPosition = Vec3.subtract(MyAvatar.position, Vec3.multiplyQbyV(Camera.getOrientation(), { x: 0, y: -hipsToEyes, z: -hipsToEyes * WATCH_AVATAR_DISTANCE }));

View file

@ -19,11 +19,9 @@ var soundPlaying = false;
function update(deltaTime) {
if (!Audio.isInjectorPlaying(soundPlaying)) {
var options = new AudioInjectionOptions();
options.position = { x:0, y:0, z:0 };
options.volume = 1.0;
options.loop = true;
soundPlaying = Audio.playSound(sound, options);
soundPlaying = Audio.playSound(sound, {
loop: true
});
print("Started sound loop");
}
}

View file

@ -37,7 +37,14 @@ var panelsCenterShift = Vec3.subtract(panelsCenter, orbCenter);
var ORB_SHIFT = { x: 0, y: -1.4, z: -0.8};
var HELMET_ATTACHMENT_URL = "https://hifi-public.s3.amazonaws.com/models/attachments/IronManMaskOnly.fbx"
var HELMET_ATTACHMENT_URL = HIFI_PUBLIC_BUCKET + "models/attachments/IronManMaskOnly.fbx"
var droneSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/drone.raw")
var currentDrone = null;
var latinSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/latin.raw")
var elevatorSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/elevator.raw")
var currentMusak = null;
function reticlePosition() {
var RETICLE_DISTANCE = 1;
@ -87,6 +94,12 @@ function drawLobby() {
// add an attachment on this avatar so other people see them in the lobby
MyAvatar.attach(HELMET_ATTACHMENT_URL, "Neck", {x: 0, y: 0, z: 0}, Quat.fromPitchYawRollDegrees(0, 0, 0), 1.15);
// start the drone sound
currentDrone = Audio.playSound(droneSound, { stereo: true, loop: true, localOnly: true });
// start one of our musak sounds
playRandomMusak();
}
}
@ -112,11 +125,35 @@ function changeLobbyTextures() {
Overlays.editOverlay(panelWall, textureProp);
}
function playRandomMusak() {
chosenSound = null;
if (latinSound.downloaded && elevatorSound.downloaded) {
chosenSound = Math.random < 0.5 ? latinSound : elevatorSound;
} else if (latinSound.downloaded) {
chosenSound = latinSound;
} else if (elevatorSound.downloaded) {
chosenSound = elevatorSound;
}
if (chosenSound) {
currentMusak = Audio.playSound(chosenSound, { stereo: true, localOnly: true })
} else {
currentMusak = null;
}
}
function cleanupLobby() {
Overlays.deleteOverlay(panelWall);
Overlays.deleteOverlay(orbShell);
Overlays.deleteOverlay(reticle);
Audio.stopInjector(currentDrone);
currentDrone = null;
Audio.stopInjector(currentMusak);
currentMusak = null;
panelWall = false;
orbShell = false;
reticle = false;

View file

@ -68,7 +68,8 @@ var text = Overlays.addOverlay("text", {
color: { red: 255, green: 0, blue: 0},
topMargin: 4,
leftMargin: 4,
text: "Here is some text.\nAnd a second line."
text: "Here is some text.\nAnd a second line.",
alpha: 0.7
});
// This will create an image overlay, which starts out as invisible

View file

@ -15,12 +15,11 @@ var bird = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Animals/bushtit_1.raw");
function maybePlaySound(deltaTime) {
if (Math.random() < 0.01) {
// Set the location and other info for the sound to play
var options = new AudioInjectionOptions();
var position = MyAvatar.position;
options.position = position;
options.volume = 0.5;
Audio.playSound(bird, options);
// Set the location and other info for the sound to play
Audio.playSound(bird, {
position: MyAvatar.position,
volume: 0.5
});
}
}

View file

@ -20,10 +20,12 @@ var sound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+A.raw"
//var sound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Cocktail+Party+Snippets/Bandcamp.wav");
var soundPlaying = false;
var options = new AudioInjectionOptions();
options.position = Vec3.sum(Camera.getPosition(), Quat.getFront(MyAvatar.orientation));
options.volume = 0.5;
options.loop = true;
var options = {
position: Vec3.sum(Camera.getPosition(), Quat.getFront(MyAvatar.orientation)),
volume: 0.5,
loop: true
}
var playing = false;
var ball = false;

View file

@ -19,24 +19,23 @@ var distance = 1;
var debug = 0;
function playSound() {
var options = new AudioInjectionOptions();
currentTime += deltaTime;
currentTime += deltaTime;
var s = distance * Math.sin(currentTime);
var c = distance * Math.cos(currentTime);
var soundOffset = { x:s, y:0, z:c };
var soundOffset = { x:s, y:0, z:c };
if (debug) {
print("t=" + currentTime + "offset=" + soundOffset.x + "," + soundOffset.y + "," + soundOffset.z);
}
if (debug) {
print("t=" + currentTime + "offset=" + soundOffset.x + "," + soundOffset.y + "," + soundOffset.z);
}
var avatarPosition = MyAvatar.position;
var soundPosition = Vec3.sum(avatarPosition,soundOffset);
var avatarPosition = MyAvatar.position;
var soundPosition = Vec3.sum(avatarPosition,soundOffset);
options.position = soundPosition
options.volume = 1.0;
Audio.playSound(soundClip, options);
Audio.playSound(soundClip, {
position: soundPosition
});
}
Script.setInterval(playSound, 250);

View file

@ -14,11 +14,10 @@ Script.include("libraries/globals.js");
var soundClip = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Cocktail%20Party%20Snippets/Walken1.wav");
function playSound() {
var options = new AudioInjectionOptions();
var position = MyAvatar.position;
options.position = position;
options.volume = 0.5;
Audio.playSound(soundClip, options);
Audio.playSound(soundClip, {
position: MyAvatar.position,
volume: 0.5
});
}
Script.setInterval(playSound, 10000);

View file

@ -15,10 +15,12 @@ var modelURL = HIFI_PUBLIC_BUCKET + "models/entities/radio/Speakers.fbx";
var soundURL = HIFI_PUBLIC_BUCKET + "sounds/FamilyStereo.raw";
var AudioRotationOffset = Quat.fromPitchYawRollDegrees(0, -90, 0);
var audioOptions = new AudioInjectionOptions();
audioOptions.volume = 0.5;
audioOptions.loop = true;
audioOptions.isStereo = true;
var audioOptions = {
volume: 0.5,
loop: true,
stereo: true
}
var injector = null;
var sound = new Sound(soundURL, audioOptions.isStereo);

View file

@ -217,7 +217,8 @@ function update(deltaTime) {
if (invaderStepOfCycle % stepsPerSound == 0) {
// play the move sound
var options = new AudioInjectionOptions();
var options = {};
if (soundInMyHead) {
options.position = { x: MyAvatar.position.x + 0.0,
y: MyAvatar.position.y + 0.1,
@ -225,7 +226,7 @@ function update(deltaTime) {
} else {
options.position = getInvaderPosition(invadersPerRow / 2, numberOfRows / 2);
}
options.volume = 1.0;
Audio.playSound(moveSounds[currentMoveSound], options);
// get ready for next move sound
@ -330,7 +331,7 @@ function fireMissile() {
lifetime: 5
});
var options = new AudioInjectionOptions();
var options = {}
if (soundInMyHead) {
options.position = { x: MyAvatar.position.x + 0.0,
y: MyAvatar.position.y + 0.1,
@ -338,7 +339,7 @@ function fireMissile() {
} else {
options.position = missilePosition;
}
options.volume = 1.0;
Audio.playSound(shootSound, options);
missileFired = true;
@ -380,7 +381,7 @@ function deleteIfInvader(possibleInvaderEntity) {
Entities.deleteEntity(myMissile);
// play the hit sound
var options = new AudioInjectionOptions();
var options = {};
if (soundInMyHead) {
options.position = { x: MyAvatar.position.x + 0.0,
y: MyAvatar.position.y + 0.1,
@ -388,7 +389,7 @@ function deleteIfInvader(possibleInvaderEntity) {
} else {
options.position = getInvaderPosition(row, column);
}
options.volume = 1.0;
Audio.playSound(hitSound, options);
}
}

View file

@ -113,10 +113,7 @@ function checkControllerSide(whichSide) {
inHand: true };
Entities.editEntity(closestEntity, properties);
var options = new AudioInjectionOptions();
options.position = ballPosition;
options.volume = 1.0;
Audio.playSound(catchSound, options);
Audio.playSound(catchSound, { position: ballPosition });
return; // exit early
}
@ -156,10 +153,7 @@ function checkControllerSide(whichSide) {
}
// Play a new ball sound
var options = new AudioInjectionOptions();
options.position = ballPosition;
options.volume = 1.0;
Audio.playSound(newSound, options);
Audio.playSound(newSound, { position: ballPosition});
return; // exit early
}
@ -207,10 +201,7 @@ function checkControllerSide(whichSide) {
rightHandEntity = false;
}
var options = new AudioInjectionOptions();
options.position = ballPosition;
options.volume = 1.0;
Audio.playSound(throwSound, options);
Audio.playSound(throwSound, { position: ballPosition });
}
}
}

View file

@ -88,7 +88,6 @@ Script.update.connect(function(deltaTime) {
animateAvatar(deltaTime, speed);
break;
}
case state.EDIT_STANDING: {
motion.curAnim = motion.selStand;
motion.direction = FORWARDS;
@ -559,9 +558,11 @@ function getLeanRoll(deltaTime, speed) {
function playFootstep(side) {
var options = new AudioInjectionOptions();
options.position = Camera.getPosition();
options.volume = 0.3;
options = {
position: Camera.getPosition(),
volume: 0.3
}
var soundNumber = 2; // 0 to 2
if (side === RIGHT && motion.makesFootStepSounds) {
Audio.playSound(walkAssets.footsteps[soundNumber + 1], options);
@ -2609,4 +2610,4 @@ function animateAvatar(deltaTime, speed) {
}
// Begin by setting an internal state
state.setInternalState(state.STANDING);
state.setInternalState(state.STANDING);

View file

@ -109,6 +109,7 @@ static unsigned STARFIELD_SEED = 1;
static const int BANDWIDTH_METER_CLICK_MAX_DRAG_LENGTH = 6; // farther dragged clicks are ignored
const qint64 MAXIMUM_CACHE_SIZE = 10737418240; // 10GB
static QTimer* idleTimer = NULL;
@ -159,6 +160,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_lastQueriedViewFrustum(),
_lastQueriedTime(usecTimestampNow()),
_mirrorViewRect(QRect(MIRROR_VIEW_LEFT_PADDING, MIRROR_VIEW_TOP_PADDING, MIRROR_VIEW_WIDTH, MIRROR_VIEW_HEIGHT)),
_viewTransform(new gpu::Transform()),
_scaleMirror(1.0f),
_rotateMirror(0.0f),
_raiseMirror(0.0f),
@ -418,7 +420,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_trayIcon->show();
// set the local loopback interface for local sounds from audio scripts
AudioScriptingInterface::getInstance().setLocalLoopbackInterface(&_audio);
AudioScriptingInterface::getInstance().setLocalAudioInterface(&_audio);
#ifdef HAVE_RTMIDI
// setup the MIDIManager
@ -454,23 +456,22 @@ Application::~Application() {
// ask the datagram processing thread to quit and wait until it is done
_nodeThread->quit();
_nodeThread->wait();
// kill any audio injectors that are still around
AudioScriptingInterface::getInstance().stopAllInjectors();
// stop the audio process
QMetaObject::invokeMethod(&_audio, "stop");
// ask the audio thread to quit and wait until it is done
_audio.thread()->quit();
_audio.thread()->wait();
// kill any audio injectors that are still around
AudioScriptingInterface::getInstance().stopAllInjectors();
_octreeProcessor.terminate();
_voxelHideShowThread.terminate();
_voxelEditSender.terminate();
_entityEditSender.terminate();
VoxelTreeElement::removeDeleteHook(&_voxels); // we don't need to do this processing on shutdown
Menu::getInstance()->deleteLater();
@ -837,12 +838,14 @@ void Application::controlledBroadcastToNodes(const QByteArray& packet, const Nod
}
bool Application::event(QEvent* event) {
// handle custom URL
if (event->type() == QEvent::FileOpen) {
QFileOpenEvent* fileEvent = static_cast<QFileOpenEvent*>(event);
if (fileEvent->url().isValid()) {
openUrl(fileEvent->url());
if (!fileEvent->url().isEmpty()) {
AddressManager::getInstance().handleLookupString(fileEvent->url().toLocalFile());
}
return false;
@ -1533,11 +1536,6 @@ void Application::idle() {
_idleLoopStdev.reset();
}
if (Menu::getInstance()->isOptionChecked(MenuOption::BuckyBalls)) {
PerformanceTimer perfTimer("buckyBalls");
_buckyBalls.simulate(timeSinceLastUpdate / 1000.f, Application::getInstance()->getAvatar()->getHandData());
}
// After finishing all of the above work, restart the idle timer, allowing 2ms to process events.
idleTimer->start(2);
@ -2801,6 +2799,8 @@ void Application::updateShadowMap() {
// store view matrix without translation, which we'll use for precision-sensitive objects
updateUntranslatedViewMatrix();
// TODO: assign an equivalent viewTransform object to the application to match the current path which uses glMatrixStack
// setViewTransform(viewTransform);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(1.1f, 4.0f); // magic numbers courtesy http://www.eecs.berkeley.edu/~ravir/6160/papers/shadowmaps.ppt
@ -2908,6 +2908,19 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
// store view matrix without translation, which we'll use for precision-sensitive objects
updateUntranslatedViewMatrix(-whichCamera.getPosition());
// Equivalent to what is happening with _untranslatedViewMatrix and the _viewMatrixTranslation
// the viewTransofmr object is updatded with the correct values and saved,
// this is what is used for rendering the Entities and avatars
gpu::Transform viewTransform;
viewTransform.setTranslation(whichCamera.getPosition());
viewTransform.setRotation(rotation);
viewTransform.postTranslate(eyeOffsetPos);
viewTransform.postRotate(eyeOffsetOrient);
if (whichCamera.getMode() == CAMERA_MODE_MIRROR) {
viewTransform.setScale(gpu::Transform::Vec3(-1.0f, 1.0f, 1.0f));
}
setViewTransform(viewTransform);
glTranslatef(_viewMatrixTranslation.x, _viewMatrixTranslation.y, _viewMatrixTranslation.z);
// Setup 3D lights (after the camera transform, so that they are positioned in world space)
@ -2986,13 +2999,6 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
_audioReflector.render();
}
if (Menu::getInstance()->isOptionChecked(MenuOption::BuckyBalls)) {
PerformanceTimer perfTimer("buckyBalls");
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings),
"Application::displaySide() ... bucky balls...");
_buckyBalls.render();
}
// Draw voxels
if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) {
PerformanceTimer perfTimer("voxels");
@ -3032,13 +3038,16 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
}
}
bool mirrorMode = (whichCamera.getMode() == CAMERA_MODE_MIRROR);
{
PerformanceTimer perfTimer("avatars");
_avatarManager.renderAvatars(mirrorMode ? Avatar::MIRROR_RENDER_MODE : Avatar::NORMAL_RENDER_MODE,
false, selfAvatarOnly);
}
{
PROFILE_RANGE("DeferredLighting");
PerformanceTimer perfTimer("lighting");
@ -3097,7 +3106,7 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
emit renderingInWorldInterface();
}
}
if (Menu::getInstance()->isOptionChecked(MenuOption::Wireframe)) {
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
}
@ -3108,6 +3117,10 @@ void Application::updateUntranslatedViewMatrix(const glm::vec3& viewMatrixTransl
_viewMatrixTranslation = viewMatrixTranslation;
}
void Application::setViewTransform(const gpu::Transform& view) {
(*_viewTransform) = view;
}
void Application::loadTranslatedViewMatrix(const glm::vec3& translation) {
glLoadMatrixf((const GLfloat*)&_untranslatedViewMatrix);
glTranslatef(translation.x + _viewMatrixTranslation.x, translation.y + _viewMatrixTranslation.y,

View file

@ -43,7 +43,6 @@
#include "MainWindow.h"
#include "Audio.h"
#include "AudioReflector.h"
#include "BuckyBalls.h"
#include "Camera.h"
#include "DatagramProcessor.h"
#include "Environment.h"
@ -233,6 +232,9 @@ public:
const glm::vec3& getViewMatrixTranslation() const { return _viewMatrixTranslation; }
void setViewMatrixTranslation(const glm::vec3& translation) { _viewMatrixTranslation = translation; }
const gpu::TransformPointer& getViewTransform() const { return _viewTransform; }
void setViewTransform(const gpu::Transform& view);
/// if you need to access the application settings, use lockSettings()/unlockSettings()
QSettings* lockSettings() { _settingsMutex.lock(); return _settings; }
void unlockSettings() { _settingsMutex.unlock(); }
@ -481,8 +483,6 @@ private:
bool _justStarted;
Stars _stars;
BuckyBalls _buckyBalls;
VoxelSystem _voxels;
VoxelTree _clipboard; // if I copy/paste
VoxelImportDialog* _voxelImportDialog;
@ -526,6 +526,7 @@ private:
QRect _mirrorViewRect;
RearMirrorTools* _rearMirrorTools;
gpu::TransformPointer _viewTransform;
glm::mat4 _untranslatedViewMatrix;
glm::vec3 _viewMatrixTranslation;
glm::mat4 _projectionMatrix;

View file

@ -32,19 +32,21 @@
#include <QtMultimedia/QAudioOutput>
#include <QSvgRenderer>
#include <glm/glm.hpp>
#include <AudioInjector.h>
#include <NodeList.h>
#include <PacketHeaders.h>
#include <SharedUtil.h>
#include <StDev.h>
#include <UUID.h>
#include <glm/glm.hpp>
#include "Audio.h"
#include "Menu.h"
#include "Util.h"
#include "PositionalAudioStream.h"
#include "Audio.h"
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300;
@ -1366,24 +1368,34 @@ void Audio::startDrumSound(float volume, float frequency, float duration, float
_drumSoundSample = 0;
}
void Audio::handleAudioByteArray(const QByteArray& audioByteArray, const AudioInjectorOptions& injectorOptions) {
if (audioByteArray.size() > 0) {
QAudioFormat localFormat = _outputFormat;
bool Audio::outputLocalInjector(bool isStereo, qreal volume, AudioInjector* injector) {
if (injector->getLocalBuffer()) {
QAudioFormat localFormat = _desiredOutputFormat;
localFormat.setChannelCount(isStereo ? 2 : 1);
if (!injectorOptions.isStereo()) {
localFormat.setChannelCount(1);
}
QAudioOutput* localOutput = new QAudioOutput(getNamedAudioDeviceForMode(QAudio::AudioOutput, _outputAudioDeviceName),
localFormat, this);
localOutput->setVolume(volume);
QAudioOutput* localSoundOutput = new QAudioOutput(getNamedAudioDeviceForMode(QAudio::AudioOutput, _outputAudioDeviceName), localFormat, this);
// add this to our list of local injected outputs, we will need to clean it up when the injector says it is done
_injectedOutputInterfaces.insert(injector, localOutput);
QIODevice* localIODevice = localSoundOutput->start();
if (localIODevice) {
localIODevice->write(audioByteArray);
} else {
qDebug() << "Unable to handle audio byte array. Error:" << localSoundOutput->error();
}
} else {
qDebug() << "Audio::handleAudioByteArray called with an empty byte array. Sound is likely still downloading.";
connect(injector, &AudioInjector::finished, this, &Audio::cleanupLocalOutputInterface);
localOutput->start(injector->getLocalBuffer());
return localOutput->state() == QAudio::ActiveState;
}
return false;
}
void Audio::cleanupLocalOutputInterface() {
QAudioOutput* outputInterface = _injectedOutputInterfaces.value(sender());
if (outputInterface) {
qDebug() << "Stopping a QAudioOutput interface since injector" << sender() << "is finished";
outputInterface->stop();
outputInterface->deleteLater();
}
}

View file

@ -155,7 +155,7 @@ public slots:
void selectAudioFilterBassCut();
void selectAudioFilterSmiley();
virtual void handleAudioByteArray(const QByteArray& audioByteArray, const AudioInjectorOptions& options);
virtual bool outputLocalInjector(bool isStereo, qreal volume, AudioInjector* injector);
void sendDownstreamAudioStatsPacket();
@ -180,11 +180,11 @@ signals:
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
private slots:
void cleanupLocalOutputInterface();
private:
void outputFormatChanged();
private:
QByteArray firstInputFrame;
QAudioInput* _audioInput;
QAudioFormat _desiredInputFormat;
@ -256,10 +256,6 @@ private:
float _iconColor;
qint64 _iconPulseTimeReference;
/// Audio callback in class context.
inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight);
bool _processSpatialAudio; /// Process received audio by spatial audio hooks
unsigned int _spatialAudioStart; /// Start of spatial audio interval (in sample rate time base)
unsigned int _spatialAudioFinish; /// End of spatial audio interval (in sample rate time base)
@ -372,6 +368,8 @@ private:
AudioOutputIODevice _audioOutputIODevice;
WeakRecorderPointer _recorder;
QHash<QObject*, QAudioOutput*> _injectedOutputInterfaces;
};

View file

@ -1,179 +0,0 @@
//
// BuckyBalls.cpp
// interface/src
//
// Created by Philip on 1/2/14.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "BuckyBalls.h"
#include "Application.h"
#include "Util.h"
#include "world.h"
#include "devices/SixenseManager.h"
const int NUM_ELEMENTS = 3;
const float RANGE_BBALLS = 0.5f;
const float SIZE_BBALLS = 0.02f;
const float CORNER_BBALLS = 2.f;
const float GRAVITY_BBALLS = -0.25f;
const float BBALLS_ATTRACTION_DISTANCE = SIZE_BBALLS / 2.f;
const float COLLISION_RADIUS = 0.01f;
const float INITIAL_VELOCITY = 0.3f;
glm::vec3 colors[NUM_ELEMENTS];
// Make some bucky balls for the avatar
BuckyBalls::BuckyBalls() {
_bballIsGrabbed[0] = 0;
_bballIsGrabbed[1] = 0;
colors[0] = glm::vec3(0.13f, 0.55f, 0.13f);
colors[1] = glm::vec3(0.64f, 0.16f, 0.16f);
colors[2] = glm::vec3(0.31f, 0.58f, 0.80f);
for (int i = 0; i < NUM_BBALLS; i++) {
_bballPosition[i] = CORNER_BBALLS + randVector() * RANGE_BBALLS;
int element = (rand() % NUM_ELEMENTS);
if (element == 0) {
_bballRadius[i] = SIZE_BBALLS;
_bballColor[i] = colors[0];
} else if (element == 1) {
_bballRadius[i] = SIZE_BBALLS / 2.f;
_bballColor[i] = colors[1];
} else {
_bballRadius[i] = SIZE_BBALLS * 2.f;
_bballColor[i] = colors[2];
}
_bballColliding[i] = 0.f;
_bballElement[i] = element;
if (_bballElement[i] != 1) {
_bballVelocity[i] = randVector() * INITIAL_VELOCITY;
} else {
_bballVelocity[i] = glm::vec3(0);
}
}
}
void BuckyBalls::grab(PalmData& palm, float deltaTime) {
float penetration;
glm::vec3 fingerTipPosition = palm.getTipPosition();
if (palm.getControllerButtons() & BUTTON_FWD) {
if (!_bballIsGrabbed[palm.getSixenseID()]) {
// Look for a ball to grab
for (int i = 0; i < NUM_BBALLS; i++) {
glm::vec3 diff = _bballPosition[i] - fingerTipPosition;
penetration = glm::length(diff) - (_bballRadius[i] + COLLISION_RADIUS);
if (penetration < 0.f) {
_bballIsGrabbed[palm.getSixenseID()] = i;
}
}
}
if (_bballIsGrabbed[palm.getSixenseID()]) {
// If ball being grabbed, move with finger
glm::vec3 diff = _bballPosition[_bballIsGrabbed[palm.getSixenseID()]] - fingerTipPosition;
penetration = glm::length(diff) - (_bballRadius[_bballIsGrabbed[palm.getSixenseID()]] + COLLISION_RADIUS);
_bballPosition[_bballIsGrabbed[palm.getSixenseID()]] -= glm::normalize(diff) * penetration;
glm::vec3 fingerTipVelocity = palm.getTipVelocity();
if (_bballElement[_bballIsGrabbed[palm.getSixenseID()]] != 1) {
_bballVelocity[_bballIsGrabbed[palm.getSixenseID()]] = fingerTipVelocity;
}
_bballPosition[_bballIsGrabbed[palm.getSixenseID()]] = fingerTipPosition;
_bballColliding[_bballIsGrabbed[palm.getSixenseID()]] = 1.f;
}
} else {
_bballIsGrabbed[palm.getSixenseID()] = 0;
}
}
const float COLLISION_BLEND_RATE = 0.5f;
const float ATTRACTION_BLEND_RATE = 0.9f;
const float ATTRACTION_VELOCITY_BLEND_RATE = 0.10f;
void BuckyBalls::simulate(float deltaTime, const HandData* handData) {
// First, update the grab behavior from the hand controllers
for (size_t i = 0; i < handData->getNumPalms(); ++i) {
PalmData palm = handData->getPalms()[i];
grab(palm, deltaTime);
}
// Look for collisions
for (int i = 0; i < NUM_BBALLS; i++) {
if (_bballElement[i] != 1) {
// For 'interacting' elements, look for other balls to interact with
for (int j = 0; j < NUM_BBALLS; j++) {
if (i != j) {
glm::vec3 diff = _bballPosition[i] - _bballPosition[j];
float diffLength = glm::length(diff);
float penetration = diffLength - (_bballRadius[i] + _bballRadius[j]);
if (diffLength != 0) {
if (penetration < 0.f) {
// Colliding - move away and transfer velocity
_bballPosition[i] -= glm::normalize(diff) * penetration * COLLISION_BLEND_RATE;
if (glm::dot(_bballVelocity[i], diff) < 0.f) {
_bballVelocity[i] = _bballVelocity[i] * (1.f - COLLISION_BLEND_RATE) +
glm::reflect(_bballVelocity[i], glm::normalize(diff)) * COLLISION_BLEND_RATE;
}
} else if ((penetration > EPSILON) && (penetration < BBALLS_ATTRACTION_DISTANCE)) {
// If they get close to each other, bring them together with magnetic force
_bballPosition[i] -= glm::normalize(diff) * penetration * ATTRACTION_BLEND_RATE;
// Also make their velocities more similar
_bballVelocity[i] = _bballVelocity[i] * (1.f - ATTRACTION_VELOCITY_BLEND_RATE) + _bballVelocity[j] * ATTRACTION_VELOCITY_BLEND_RATE;
}
}
}
}
}
}
// Update position and bounce on walls
const float BBALL_CONTINUOUS_DAMPING = 0.00f;
const float BBALL_WALL_COLLISION_DAMPING = 0.2f;
const float COLLISION_DECAY_RATE = 0.8f;
for (int i = 0; i < NUM_BBALLS; i++) {
_bballPosition[i] += _bballVelocity[i] * deltaTime;
if (_bballElement[i] != 1) {
_bballVelocity[i].y += GRAVITY_BBALLS * deltaTime;
}
_bballVelocity[i] -= _bballVelocity[i] * BBALL_CONTINUOUS_DAMPING * deltaTime;
for (int j = 0; j < 3; j++) {
if ((_bballPosition[i][j] + _bballRadius[i]) > (CORNER_BBALLS + RANGE_BBALLS)) {
_bballPosition[i][j] = (CORNER_BBALLS + RANGE_BBALLS) - _bballRadius[i];
_bballVelocity[i][j] *= -(1.f - BBALL_WALL_COLLISION_DAMPING);
}
if ((_bballPosition[i][j] - _bballRadius[i]) < (CORNER_BBALLS -RANGE_BBALLS)) {
_bballPosition[i][j] = (CORNER_BBALLS -RANGE_BBALLS) + _bballRadius[i];
_bballVelocity[i][j] *= -(1.f - BBALL_WALL_COLLISION_DAMPING);
}
}
_bballColliding[i] *= COLLISION_DECAY_RATE;
if (_bballColliding[i] < 0.1f) {
_bballColliding[i] = 0.f;
}
}
}
void BuckyBalls::render() {
glEnable(GL_LIGHTING);
for (int i = 0; i < NUM_BBALLS; i++) {
if (_bballColliding[i] > 0.f) {
const float GRAB_BRIGHTEN = 1.15f;
glColor3f(_bballColor[i].x * GRAB_BRIGHTEN, _bballColor[i].y * GRAB_BRIGHTEN, _bballColor[i].z * GRAB_BRIGHTEN);
} else {
glColor3f(_bballColor[i].x, _bballColor[i].y, _bballColor[i].z);
}
glPushMatrix();
glTranslatef(_bballPosition[i].x, _bballPosition[i].y, _bballPosition[i].z);
Application::getInstance()->getGeometryCache()->renderSphere(_bballRadius[i], 15, 15);
glPopMatrix();
}
}

View file

@ -1,47 +0,0 @@
//
// BuckyBalls.h
// interface/src
//
// Created by Philip on 1/2/14.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_BuckyBalls_h
#define hifi_BuckyBalls_h
#include <iostream>
#include <glm/glm.hpp>
#include <HandData.h>
#include <SharedUtil.h>
#include "GeometryUtil.h"
#include "InterfaceConfig.h"
#include "Util.h"
const int NUM_BBALLS = 200;
class BuckyBalls {
public:
BuckyBalls();
void grab(PalmData& palm, float deltaTime);
void simulate(float deltaTime, const HandData* handData);
void render();
private:
glm::vec3 _bballPosition[NUM_BBALLS];
glm::vec3 _bballVelocity[NUM_BBALLS];
glm::vec3 _bballColor[NUM_BBALLS];
float _bballRadius[NUM_BBALLS];
float _bballColliding[NUM_BBALLS];
int _bballElement[NUM_BBALLS];
int _bballIsGrabbed[2];
};
#endif // hifi_BuckyBalls_h

View file

@ -348,7 +348,6 @@ Menu::Menu() :
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::OffAxisProjection, 0, false);
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::TurnWithHead, 0, false);
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::MoveWithLean, 0, false);
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::HeadMouse, 0, false);
@ -439,7 +438,7 @@ Menu::Menu() :
addCheckableActionToQMenuAndActionHash(entitiesDebugMenu, MenuOption::DisableLightEntities, 0, false);
addCheckableActionToQMenuAndActionHash(entitiesDebugMenu, MenuOption::DontReduceMaterialSwitches, 0, false);
addCheckableActionToQMenuAndActionHash(entitiesDebugMenu, MenuOption::DontRenderEntitiesAsScene, 0, false);
addCheckableActionToQMenuAndActionHash(entitiesDebugMenu, MenuOption::RenderEntitiesAsScene, 0, false);
QMenu* entityCullingMenu = entitiesDebugMenu->addMenu("Culling");
addCheckableActionToQMenuAndActionHash(entityCullingMenu, MenuOption::DontCullOutOfViewMeshParts, 0, false);
@ -722,7 +721,6 @@ Menu::Menu() :
connect(appInstance->getAudio(), SIGNAL(muteToggled()), this, SLOT(audioMuteToggled()));
QMenu* experimentalOptionsMenu = developerMenu->addMenu("Experimental");
addCheckableActionToQMenuAndActionHash(experimentalOptionsMenu, MenuOption::BuckyBalls, 0, false);
addCheckableActionToQMenuAndActionHash(experimentalOptionsMenu, MenuOption::StringHair, 0, false);
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,

View file

@ -364,7 +364,6 @@ namespace MenuOption {
const QString Bandwidth = "Bandwidth Display";
const QString BandwidthDetails = "Bandwidth Details";
const QString BlueSpeechSphere = "Blue Sphere While Speaking";
const QString BuckyBalls = "Bucky Balls";
const QString CascadedShadows = "Cascaded";
const QString Chat = "Chat...";
const QString ChatCircling = "Chat Circling";
@ -378,7 +377,6 @@ namespace MenuOption {
const QString DontCullOutOfViewMeshParts = "Don't Cull Out Of View Mesh Parts";
const QString DontCullTooSmallMeshParts = "Don't Cull Too Small Mesh Parts";
const QString DontReduceMaterialSwitches = "Don't Attempt to Reduce Material Switches";
const QString DontRenderEntitiesAsScene = "Don't Render Entities as Scene";
const QString DecreaseAvatarSize = "Decrease Avatar Size";
const QString DecreaseVoxelSize = "Decrease Voxel Size";
const QString DisableActivityLogger = "Disable Activity Logger";
@ -431,7 +429,6 @@ namespace MenuOption {
const QString MetavoxelEditor = "Metavoxel Editor...";
const QString Metavoxels = "Metavoxels";
const QString Mirror = "Mirror";
const QString MoveWithLean = "Move with Lean";
const QString MuteAudio = "Mute Microphone";
const QString MuteEnvironment = "Mute Environment";
const QString MyLocations = "My Locations...";
@ -450,6 +447,7 @@ namespace MenuOption {
const QString ReloadAllScripts = "Reload All Scripts";
const QString RenderBoundingCollisionShapes = "Show Bounding Collision Shapes";
const QString RenderDualContourSurfaces = "Render Dual Contour Surfaces";
const QString RenderEntitiesAsScene = "Render Entities as Scene";
const QString RenderFocusIndicator = "Show Eye Focus";
const QString RenderHeadCollisionShapes = "Show Head Collision Shapes";
const QString RenderHeightfields = "Render Heightfields";

View file

@ -145,11 +145,6 @@ void MyAvatar::update(float deltaTime) {
Head* head = getHead();
head->relaxLean(deltaTime);
updateFromTrackers(deltaTime);
if (Menu::getInstance()->isOptionChecked(MenuOption::MoveWithLean)) {
// Faceshift drive is enabled, set the avatar drive based on the head position
moveWithLean();
}
// Get audio loudness data from audio input device
Audio* audio = Application::getInstance()->getAudio();
head->setAudioLoudness(audio->getLastInputLoudness());
@ -374,36 +369,6 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
-MAX_LEAN, MAX_LEAN));
}
void MyAvatar::moveWithLean() {
// Move with Lean by applying thrust proportional to leaning
Head* head = getHead();
glm::quat orientation = head->getCameraOrientation();
glm::vec3 front = orientation * IDENTITY_FRONT;
glm::vec3 right = orientation * IDENTITY_RIGHT;
float leanForward = head->getLeanForward();
float leanSideways = head->getLeanSideways();
// Degrees of 'dead zone' when leaning, and amount of acceleration to apply to lean angle
const float LEAN_FWD_DEAD_ZONE = 15.0f;
const float LEAN_SIDEWAYS_DEAD_ZONE = 10.0f;
const float LEAN_FWD_THRUST_SCALE = 4.0f;
const float LEAN_SIDEWAYS_THRUST_SCALE = 3.0f;
if (fabs(leanForward) > LEAN_FWD_DEAD_ZONE) {
if (leanForward > 0.0f) {
addThrust(front * -(leanForward - LEAN_FWD_DEAD_ZONE) * LEAN_FWD_THRUST_SCALE);
} else {
addThrust(front * -(leanForward + LEAN_FWD_DEAD_ZONE) * LEAN_FWD_THRUST_SCALE);
}
}
if (fabs(leanSideways) > LEAN_SIDEWAYS_DEAD_ZONE) {
if (leanSideways > 0.0f) {
addThrust(right * -(leanSideways - LEAN_SIDEWAYS_DEAD_ZONE) * LEAN_SIDEWAYS_THRUST_SCALE);
} else {
addThrust(right * -(leanSideways + LEAN_SIDEWAYS_DEAD_ZONE) * LEAN_SIDEWAYS_THRUST_SCALE);
}
}
}
void MyAvatar::renderDebugBodyPoints() {
glm::vec3 torsoPosition(getPosition());

View file

@ -47,7 +47,6 @@ public:
void update(float deltaTime);
void simulate(float deltaTime);
void updateFromTrackers(float deltaTime);
void moveWithLean();
void render(const glm::vec3& cameraPosition, RenderMode renderMode = NORMAL_RENDER_MODE, bool postLighting = false);
void renderBody(RenderMode renderMode, bool postLighting, float glowLevel = 0.0f);

View file

@ -253,7 +253,7 @@ void EntityTreeRenderer::checkEnterLeaveEntities() {
}
void EntityTreeRenderer::render(RenderArgs::RenderMode renderMode) {
bool dontRenderAsScene = Menu::getInstance()->isOptionChecked(MenuOption::DontRenderEntitiesAsScene);
bool dontRenderAsScene = !Menu::getInstance()->isOptionChecked(MenuOption::RenderEntitiesAsScene);
if (dontRenderAsScene) {
OctreeRenderer::render(renderMode);

View file

@ -172,7 +172,7 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
// TODO: this is the majority of model render time. And rendering of a cube model vs the basic Box render
// is significantly more expensive. Is there a way to call this that doesn't cost us as much?
PerformanceTimer perfTimer("model->render");
bool dontRenderAsScene = Menu::getInstance()->isOptionChecked(MenuOption::DontRenderEntitiesAsScene);
bool dontRenderAsScene = !Menu::getInstance()->isOptionChecked(MenuOption::RenderEntitiesAsScene);
if (dontRenderAsScene) {
_model->render(alpha, modelRenderMode, args);
} else {

View file

@ -21,7 +21,11 @@ Batch::Batch() :
_commandOffsets(),
_params(),
_resources(),
_data(){
_data(),
_buffers(),
_streamFormats(),
_transforms()
{
}
Batch::~Batch() {
@ -32,8 +36,10 @@ void Batch::clear() {
_commandOffsets.clear();
_params.clear();
_resources.clear();
_buffers.clear();
_data.clear();
_buffers.clear();
_streamFormats.clear();
_transforms.clear();
}
uint32 Batch::cacheResource(Resource* res) {
@ -128,3 +134,22 @@ void Batch::setIndexBuffer(Type type, const BufferPointer& buffer, Offset offset
_params.push_back(_buffers.cache(buffer));
_params.push_back(type);
}
void Batch::setModelTransform(const TransformPointer& model) {
ADD_COMMAND(setModelTransform);
_params.push_back(_transforms.cache(model));
}
void Batch::setViewTransform(const TransformPointer& view) {
ADD_COMMAND(setViewTransform);
_params.push_back(_transforms.cache(view));
}
void Batch::setProjectionTransform(const TransformPointer& proj) {
ADD_COMMAND(setProjectionTransform);
_params.push_back(_transforms.cache(proj));
}

View file

@ -14,10 +14,10 @@
#include <assert.h>
#include "InterfaceConfig.h"
#include "Transform.h"
#include <vector>
#include "gpu/Format.h"
#include "gpu/Resource.h"
#include "gpu/Stream.h"
#if defined(NSIGHT_FOUND)
@ -50,6 +50,10 @@ enum Primitive {
NUM_PRIMITIVES,
};
typedef ::Transform Transform;
typedef QSharedPointer< ::gpu::Transform > TransformPointer;
typedef std::vector< TransformPointer > Transforms;
class Batch {
public:
typedef Stream::Slot Slot;
@ -60,11 +64,16 @@ public:
void clear();
// Drawcalls
void draw(Primitive primitiveType, uint32 numVertices, uint32 startVertex = 0);
void drawIndexed(Primitive primitiveType, uint32 nbIndices, uint32 startIndex = 0);
void drawInstanced(uint32 nbInstances, Primitive primitiveType, uint32 nbVertices, uint32 startVertex = 0, uint32 startInstance = 0);
void drawIndexedInstanced(uint32 nbInstances, Primitive primitiveType, uint32 nbIndices, uint32 startIndex = 0, uint32 startInstance = 0);
// Input Stage
// InputFormat
// InputBuffers
// IndexBuffer
void setInputFormat(const Stream::FormatPointer& format);
void setInputStream(Slot startChannel, const BufferStream& stream); // not a command, just unroll into a loop of setInputBuffer
@ -72,6 +81,16 @@ public:
void setIndexBuffer(Type type, const BufferPointer& buffer, Offset offset);
// Transform Stage
// Vertex position is transformed by ModelTransform from object space to world space
// Then by the inverse of the ViewTransform from world space to eye space
// finaly projected into the clip space by the projection transform
// WARNING: ViewTransform transform from eye space to world space, its inverse is composed
// with the ModelTransformu to create the equivalent of the glModelViewMatrix
void setModelTransform(const TransformPointer& model);
void setViewTransform(const TransformPointer& view);
void setProjectionTransform(const TransformPointer& proj);
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long
@ -138,11 +157,13 @@ public:
COMMAND_drawIndexedInstanced,
COMMAND_setInputFormat,
COMMAND_setInputBuffer,
COMMAND_setIndexBuffer,
COMMAND_setModelTransform,
COMMAND_setViewTransform,
COMMAND_setProjectionTransform,
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
@ -266,6 +287,7 @@ public:
typedef Cache<Buffer>::Vector BufferCaches;
typedef Cache<Stream::Format>::Vector StreamFormatCaches;
typedef Cache<Transform>::Vector TransformCaches;
typedef unsigned char Byte;
typedef std::vector<Byte> Bytes;
@ -299,11 +321,12 @@ public:
CommandOffsets _commandOffsets;
Params _params;
Resources _resources;
Bytes _data;
BufferCaches _buffers;
StreamFormatCaches _streamFormats;
TransformCaches _transforms;
Bytes _data;
protected:
};

View file

@ -24,11 +24,13 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::GLBackend::do_drawIndexedInstanced),
(&::gpu::GLBackend::do_setInputFormat),
(&::gpu::GLBackend::do_setInputBuffer),
(&::gpu::GLBackend::do_setIndexBuffer),
(&::gpu::GLBackend::do_setModelTransform),
(&::gpu::GLBackend::do_setViewTransform),
(&::gpu::GLBackend::do_setProjectionTransform),
(&::gpu::GLBackend::do_glEnable),
(&::gpu::GLBackend::do_glDisable),
@ -111,18 +113,16 @@ static const GLenum _elementTypeToGLType[NUM_TYPES]= {
GLBackend::GLBackend() :
_needInputFormatUpdate(true),
_inputFormat(0),
_inputBuffersState(0),
_inputBuffers(_inputBuffersState.size(), BufferPointer(0)),
_inputBufferOffsets(_inputBuffersState.size(), 0),
_inputBufferStrides(_inputBuffersState.size(), 0),
_indexBuffer(0),
_indexBufferOffset(0),
_inputAttributeActivation(0)
_inputAttributeActivation(0),
_transform()
{
}
@ -183,6 +183,7 @@ void GLBackend::checkGLError() {
void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
updateInput();
updateTransform();
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
GLenum mode = _primitiveToGLmode[primitiveType];
@ -195,6 +196,7 @@ void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
void GLBackend::do_drawIndexed(Batch& batch, uint32 paramOffset) {
updateInput();
updateTransform();
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
GLenum mode = _primitiveToGLmode[primitiveType];
@ -425,6 +427,82 @@ void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
CHECK_GL_ERROR();
}
// Transform Stage
void GLBackend::do_setModelTransform(Batch& batch, uint32 paramOffset) {
TransformPointer modelTransform = batch._transforms.get(batch._params[paramOffset]._uint);
if (_transform._model.isNull() || (modelTransform != _transform._model)) {
_transform._model = modelTransform;
_transform._invalidModel = true;
}
}
void GLBackend::do_setViewTransform(Batch& batch, uint32 paramOffset) {
TransformPointer viewTransform = batch._transforms.get(batch._params[paramOffset]._uint);
if (_transform._view.isNull() || (viewTransform != _transform._view)) {
_transform._view = viewTransform;
_transform._invalidView = true;
}
}
void GLBackend::do_setProjectionTransform(Batch& batch, uint32 paramOffset) {
TransformPointer projectionTransform = batch._transforms.get(batch._params[paramOffset]._uint);
if (_transform._projection.isNull() || (projectionTransform != _transform._projection)) {
_transform._projection = projectionTransform;
_transform._invalidProj = true;
}
}
void GLBackend::updateTransform() {
if (_transform._invalidProj) {
// TODO: implement the projection matrix assignment to gl state
/* if (_transform._lastMode != GL_PROJECTION) {
glMatrixMode(GL_PROJECTION);
_transform._lastMode = GL_PROJECTION;
}
CHECK_GL_ERROR();*/
_transform._invalidProj;
}
if (_transform._invalidModel || _transform._invalidView) {
if (!_transform._model.isNull()) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
if (!_transform._view.isNull()) {
Transform mvx;
Transform::inverseMult(mvx, (*_transform._view), (*_transform._model));
mvx.getMatrix(modelView);
} else {
_transform._model->getMatrix(modelView);
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
if (!_transform._view.isNull()) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
_transform._view->getInverseMatrix(modelView);
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
// TODO: eventually do something about the matrix when neither view nor model is specified?
// glLoadIdentity();
}
}
CHECK_GL_ERROR();
_transform._invalidModel = false;
_transform._invalidView = false;
}
}
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API

View file

@ -52,11 +52,22 @@ public:
protected:
// Draw Stage
void do_draw(Batch& batch, uint32 paramOffset);
void do_drawIndexed(Batch& batch, uint32 paramOffset);
void do_drawInstanced(Batch& batch, uint32 paramOffset);
void do_drawIndexedInstanced(Batch& batch, uint32 paramOffset);
// Input Stage
void do_setInputFormat(Batch& batch, uint32 paramOffset);
void do_setInputBuffer(Batch& batch, uint32 paramOffset);
void do_setIndexBuffer(Batch& batch, uint32 paramOffset);
void updateInput();
bool _needInputFormatUpdate;
Stream::FormatPointer _inputFormat;
typedef std::bitset<MAX_NUM_INPUT_BUFFERS> InputBuffersState;
InputBuffersState _inputBuffersState;
Buffers _inputBuffers;
Offsets _inputBufferOffsets;
Offsets _inputBufferStrides;
@ -68,18 +79,31 @@ protected:
typedef std::bitset<MAX_NUM_ATTRIBUTES> InputActivationCache;
InputActivationCache _inputAttributeActivation;
void do_draw(Batch& batch, uint32 paramOffset);
void do_drawIndexed(Batch& batch, uint32 paramOffset);
void do_drawInstanced(Batch& batch, uint32 paramOffset);
void do_drawIndexedInstanced(Batch& batch, uint32 paramOffset);
// Transform Stage
void do_setModelTransform(Batch& batch, uint32 paramOffset);
void do_setViewTransform(Batch& batch, uint32 paramOffset);
void do_setProjectionTransform(Batch& batch, uint32 paramOffset);
void updateInput();
void do_setInputFormat(Batch& batch, uint32 paramOffset);
void do_setInputBuffer(Batch& batch, uint32 paramOffset);
void updateTransform();
struct TransformStageState {
TransformPointer _model;
TransformPointer _view;
TransformPointer _projection;
bool _invalidModel;
bool _invalidView;
bool _invalidProj;
void do_setVertexBuffer(Batch& batch, uint32 paramOffset);
void do_setIndexBuffer(Batch& batch, uint32 paramOffset);
GLenum _lastMode;
TransformStageState() :
_model(0),
_view(0),
_projection(0),
_invalidModel(true),
_invalidView(true),
_invalidProj(true),
_lastMode(GL_TEXTURE) {}
} _transform;
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long

View file

@ -561,8 +561,19 @@ bool Model::renderCore(float alpha, RenderMode mode, RenderArgs* args) {
PROFILE_RANGE(__FUNCTION__);
// Let's introduce a gpu::Batch to capture all the calls to the graphics api
gpu::Batch batch;
_renderBatch.clear();
gpu::Batch& batch = _renderBatch;
GLBATCH(glPushMatrix)();
// Capture the view matrix once for the rendering of this model
if (_transforms.empty()) {
_transforms.push_back(gpu::TransformPointer(new gpu::Transform()));
}
(*_transforms[0]) = gpu::Transform((*Application::getInstance()->getViewTransform()));
// apply entity translation offset to the viewTransform in one go (it's a preTranslate because viewTransform goes from world to eye space)
_transforms[0]->preTranslate(-_translation);
batch.setViewTransform(_transforms[0]);
GLBATCH(glDisable)(GL_COLOR_MATERIAL);
@ -687,11 +698,12 @@ bool Model::renderCore(float alpha, RenderMode mode, RenderArgs* args) {
GLBATCH(glBindBuffer)(GL_ELEMENT_ARRAY_BUFFER, 0);
GLBATCH(glBindTexture)(GL_TEXTURE_2D, 0);
GLBATCH(glPopMatrix)();
// Render!
{
PROFILE_RANGE("render Batch");
::gpu::GLBackend::renderBatch(batch);
batch.clear();
}
// restore all the default material settings
@ -1471,15 +1483,29 @@ void Model::deleteGeometry() {
// Scene rendering support
QVector<Model*> Model::_modelsInScene;
gpu::Batch Model::_sceneRenderBatch;
void Model::startScene() {
_modelsInScene.clear();
}
void Model::endScene(RenderMode mode, RenderArgs* args) {
void Model::setupBatchTransform(gpu::Batch& batch) {
GLBATCH(glPushMatrix)();
// Capture the view matrix once for the rendering of this model
if (_transforms.empty()) {
_transforms.push_back(gpu::TransformPointer(new gpu::Transform()));
}
(*_transforms[0]) = gpu::Transform((*Application::getInstance()->getViewTransform()));
_transforms[0]->preTranslate(-_translation);
batch.setViewTransform(_transforms[0]);
}
void Model::endScene(RenderMode mode, RenderArgs* args) {
PROFILE_RANGE(__FUNCTION__);
// first, do all the batch/GPU setup work....
// Let's introduce a gpu::Batch to capture all the calls to the graphics api
gpu::Batch batch;
_sceneRenderBatch.clear();
gpu::Batch& batch = _sceneRenderBatch;
GLBATCH(glDisable)(GL_COLOR_MATERIAL);
@ -1528,28 +1554,44 @@ void Model::endScene(RenderMode mode, RenderArgs* args) {
// now, for each model in the scene, render the mesh portions
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, true, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, true, args);
GLBATCH(glPopMatrix)();
}
// render translucent meshes afterwards
@ -1565,28 +1607,44 @@ void Model::endScene(RenderMode mode, RenderArgs* args) {
int translucentParts = 0;
const float MOSTLY_OPAQUE_THRESHOLD = 0.75f;
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, false, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, false, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, true, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, false, true, args);
GLBATCH(glPopMatrix)();
}
GLBATCH(glDisable)(GL_ALPHA_TEST);
@ -1605,28 +1663,44 @@ void Model::endScene(RenderMode mode, RenderArgs* args) {
if (mode == DEFAULT_RENDER_MODE || mode == DIFFUSE_RENDER_MODE) {
const float MOSTLY_TRANSPARENT_THRESHOLD = 0.0f;
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, false, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, false, true, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, true, false, args);
GLBATCH(glPopMatrix)();
}
foreach(Model* model, _modelsInScene) {
model->setupBatchTransform(batch);
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, false, true, args);
GLBATCH(glPopMatrix)();
}
}
@ -1656,7 +1730,6 @@ void Model::endScene(RenderMode mode, RenderArgs* args) {
{
PROFILE_RANGE("render Batch");
::gpu::GLBackend::renderBatch(batch);
batch.clear();
}
// restore all the default material settings
@ -2071,20 +2144,17 @@ int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, fl
}
GLBATCH(glPushMatrix)();
//Application::getInstance()->loadTranslatedViewMatrix(_translation);
GLBATCH(glLoadMatrixf)((const GLfloat*)&Application::getInstance()->getUntranslatedViewMatrix());
glm::vec3 viewMatTranslation = Application::getInstance()->getViewMatrixTranslation();
GLBATCH(glTranslatef)(_translation.x + viewMatTranslation.x, _translation.y + viewMatTranslation.y,
_translation.z + viewMatTranslation.z);
const MeshState& state = _meshStates.at(i);
if (state.clusterMatrices.size() > 1) {
GLBATCH(glUniformMatrix4fv)(skinLocations->clusterMatrices, state.clusterMatrices.size(), false,
(const float*)state.clusterMatrices.constData());
batch.setModelTransform(gpu::TransformPointer());
} else {
GLBATCH(glMultMatrixf)((const GLfloat*)&state.clusterMatrices[0]);
gpu::TransformPointer modelTransform(new gpu::Transform(state.clusterMatrices[0]));
batch.setModelTransform(modelTransform);
}
if (mesh.blendshapes.isEmpty()) {
batch.setInputFormat(networkMesh._vertexFormat);
batch.setInputStream(0, *networkMesh._vertexStream);

View file

@ -16,6 +16,7 @@
#include <QObject>
#include <QUrl>
#include "Transform.h"
#include <AABox.h>
#include <AnimationCache.h>
#include <PhysicsEntity.h>
@ -33,11 +34,9 @@ class Shape;
class RenderArgs;
class ViewFrustum;
namespace gpu {
class Batch;
}
#include "gpu/Stream.h"
#include "gpu/Batch.h"
/// A generic 3D model displaying geometry loaded from a URL.
class Model : public QObject, public PhysicsEntity {
@ -284,7 +283,9 @@ private:
QUrl _url;
gpu::Buffers _blendedVertexBuffers;
gpu::Transforms _transforms;
gpu::Batch _renderBatch;
QVector<QVector<QSharedPointer<Texture> > > _dilatedTextures;
QVector<Model*> _attachments;
@ -397,6 +398,8 @@ private:
// Scene rendering support
static QVector<Model*> _modelsInScene;
static gpu::Batch _sceneRenderBatch;
static void endSceneSimple(RenderMode mode = DEFAULT_RENDER_MODE, RenderArgs* args = NULL);
static void endSceneSplitPass(RenderMode mode = DEFAULT_RENDER_MODE, RenderArgs* args = NULL);
@ -405,6 +408,8 @@ private:
bool renderCore(float alpha, RenderMode mode, RenderArgs* args);
int renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
bool hasTangents, bool hasSpecular, bool isSkinned, RenderArgs* args = NULL);
void setupBatchTransform(gpu::Batch& batch);
};
Q_DECLARE_METATYPE(QPointer<Model>)

View file

@ -919,7 +919,9 @@ void SetSpannerTool::applyEdit(const AttributePointer& attribute, const SharedOb
Application::getInstance()->setupWorldLight();
Application::getInstance()->updateUntranslatedViewMatrix();
// TODO: assign an equivalent viewTransform object to the application to match the current path which uses glMatrixStack
// setViewTransform(viewTransform);
const glm::vec4 OPAQUE_WHITE(1.0f, 1.0f, 1.0f, 1.0f);
spannerData->getRenderer()->render(OPAQUE_WHITE, SpannerRenderer::DIFFUSE_MODE, glm::vec3(), 0.0f);

View file

@ -69,14 +69,16 @@ int TextRenderer::calculateHeight(const char* str) {
return maxHeight;
}
int TextRenderer::draw(int x, int y, const char* str) {
int TextRenderer::draw(int x, int y, const char* str, float alpha) {
// Grab the current color
float currentColor[4];
glGetFloatv(GL_CURRENT_COLOR, currentColor);
int compactColor = ((int( currentColor[0] * 255.f) & 0xFF)) |
((int( currentColor[1] * 255.f) & 0xFF) << 8) |
((int( currentColor[2] * 255.f) & 0xFF) << 16) |
((int( currentColor[3] * 255.f) & 0xFF) << 24);
alpha = std::max(0.f, std::min(alpha, 1.f));
currentColor[3] *= alpha;
int compactColor = ((int(currentColor[0] * 255.f) & 0xFF)) |
((int(currentColor[1] * 255.f) & 0xFF) << 8) |
((int(currentColor[2] * 255.f) & 0xFF) << 16) |
((int(currentColor[3] * 255.f) & 0xFF) << 24);
// TODO: Remove that code once we test for performance improvments
//glEnable(GL_TEXTURE_2D);

View file

@ -63,7 +63,7 @@ public:
int calculateHeight(const char* str);
// also returns the height of the tallest character
int draw(int x, int y, const char* str);
int draw(int x, int y, const char* str, float alpha = 1.f);
int computeWidth(char ch);
int computeWidth(const char* str);

View file

@ -112,7 +112,7 @@ void Text3DOverlay::render(RenderArgs* args) {
QStringList lines = _text.split("\n");
int lineOffset = maxHeight;
foreach(QString thisLine, lines) {
textRenderer->draw(0, lineOffset, qPrintable(thisLine));
textRenderer->draw(0, lineOffset, qPrintable(thisLine), alpha);
lineOffset += maxHeight;
}

View file

@ -82,7 +82,7 @@ void TextOverlay::render(RenderArgs* args) {
if (lineOffset == 0) {
lineOffset = textRenderer->calculateHeight(qPrintable(thisLine));
}
lineOffset += textRenderer->draw(x, y + lineOffset, qPrintable(thisLine));
lineOffset += textRenderer->draw(x, y + lineOffset, qPrintable(thisLine), alpha);
const int lineGap = 2;
lineOffset += lineGap;

View file

@ -13,9 +13,13 @@
#define hifi_AbstractAudioInterface_h
#include <QtCore/QObject>
#include <QtMultimedia/qaudiooutput.h>
#include "AudioInjectorOptions.h"
class AudioInjector;
class AudioInjectorLocalBuffer;
class AbstractAudioInterface : public QObject {
Q_OBJECT
public:
@ -24,7 +28,7 @@ public:
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen) = 0;
virtual void startDrumSound(float volume, float frequency, float duration, float decay) = 0;
public slots:
virtual void handleAudioByteArray(const QByteArray& audioByteArray, const AudioInjectorOptions& options) = 0;
virtual bool outputLocalInjector(bool isStereo, qreal volume, AudioInjector* injector) = 0;
};
Q_DECLARE_METATYPE(AbstractAudioInterface*)

View file

@ -21,6 +21,14 @@
#include "AudioInjector.h"
QScriptValue injectorToScriptValue(QScriptEngine* engine, AudioInjector* const& in) {
return engine->newQObject(in);
}
void injectorFromScriptValue(const QScriptValue& object, AudioInjector*& out) {
out = qobject_cast<AudioInjector*>(object.toQObject());
}
AudioInjector::AudioInjector(QObject* parent) :
QObject(parent),
_sound(NULL),
@ -28,7 +36,8 @@ AudioInjector::AudioInjector(QObject* parent) :
_shouldStop(false),
_loudness(0.0f),
_isFinished(false),
_currentSendPosition(0)
_currentSendPosition(0),
_localBuffer(NULL)
{
}
@ -38,10 +47,17 @@ AudioInjector::AudioInjector(Sound* sound, const AudioInjectorOptions& injectorO
_shouldStop(false),
_loudness(0.0f),
_isFinished(false),
_currentSendPosition(0)
_currentSendPosition(0),
_localBuffer(NULL)
{
}
AudioInjector::~AudioInjector() {
if (_localBuffer) {
_localBuffer->stop();
}
}
void AudioInjector::setOptions(AudioInjectorOptions& options) {
_options = options;
}
@ -50,9 +66,54 @@ float AudioInjector::getLoudness() {
return _loudness;
}
void AudioInjector::injectAudio() {
if (_options.localOnly) {
injectLocally();
} else {
injectToMixer();
}
}
void AudioInjector::injectLocally() {
bool success = false;
if (_localAudioInterface) {
const QByteArray& soundByteArray = _sound->getByteArray();
if (soundByteArray.size() > 0) {
_localBuffer = new AudioInjectorLocalBuffer(_sound->getByteArray(), this);
_localBuffer->open(QIODevice::ReadOnly);
_localBuffer->setShouldLoop(_options.loop);
QMetaObject::invokeMethod(_localAudioInterface, "outputLocalInjector",
Qt::BlockingQueuedConnection,
Q_RETURN_ARG(bool, success),
Q_ARG(bool, _options.stereo),
Q_ARG(qreal, _options.volume),
Q_ARG(AudioInjector*, this));
if (!success) {
qDebug() << "AudioInjector::injectLocally could not output locally via _localAudioInterface";
}
} else {
qDebug() << "AudioInjector::injectLocally called without any data in Sound QByteArray";
}
} else {
qDebug() << "AudioInjector::injectLocally cannot inject locally with no local audio interface present.";
}
if (!success) {
// we never started so we are finished, call our stop method
stop();
}
}
const uchar MAX_INJECTOR_VOLUME = 0xFF;
void AudioInjector::injectAudio() {
void AudioInjector::injectToMixer() {
QByteArray soundByteArray = _sound->getByteArray();
if (_currentSendPosition < 0 ||
@ -75,7 +136,7 @@ void AudioInjector::injectAudio() {
packetStream << QUuid::createUuid();
// pack the stereo/mono type of the stream
packetStream << _options.isStereo();
packetStream << _options.stereo;
// pack the flag for loopback
uchar loopbackFlag = (uchar) true;
@ -83,13 +144,13 @@ void AudioInjector::injectAudio() {
// pack the position for injected audio
int positionOptionOffset = injectAudioPacket.size();
packetStream.writeRawData(reinterpret_cast<const char*>(&_options.getPosition()),
sizeof(_options.getPosition()));
packetStream.writeRawData(reinterpret_cast<const char*>(&_options.position),
sizeof(_options.position));
// pack our orientation for injected audio
int orientationOptionOffset = injectAudioPacket.size();
packetStream.writeRawData(reinterpret_cast<const char*>(&_options.getOrientation()),
sizeof(_options.getOrientation()));
packetStream.writeRawData(reinterpret_cast<const char*>(&_options.orientation),
sizeof(_options.orientation));
// pack zero for radius
float radius = 0;
@ -97,23 +158,23 @@ void AudioInjector::injectAudio() {
// pack 255 for attenuation byte
int volumeOptionOffset = injectAudioPacket.size();
quint8 volume = MAX_INJECTOR_VOLUME * _options.getVolume();
quint8 volume = MAX_INJECTOR_VOLUME * _options.volume;
packetStream << volume;
packetStream << _options.ignorePenumbra();
packetStream << _options.ignorePenumbra;
QElapsedTimer timer;
timer.start();
int nextFrame = 0;
int numPreAudioDataBytes = injectAudioPacket.size();
bool shouldLoop = _options.getLoop();
bool shouldLoop = _options.loop;
// loop to send off our audio in NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL byte chunks
quint16 outgoingInjectedAudioSequenceNumber = 0;
while (_currentSendPosition < soundByteArray.size() && !_shouldStop) {
int bytesToCopy = std::min(((_options.isStereo()) ? 2 : 1) * NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL,
int bytesToCopy = std::min(((_options.stereo) ? 2 : 1) * NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL,
soundByteArray.size() - _currentSendPosition);
// Measure the loudness of this frame
@ -125,12 +186,12 @@ void AudioInjector::injectAudio() {
_loudness /= (float)(bytesToCopy / sizeof(int16_t));
memcpy(injectAudioPacket.data() + positionOptionOffset,
&_options.getPosition(),
sizeof(_options.getPosition()));
&_options.position,
sizeof(_options.position));
memcpy(injectAudioPacket.data() + orientationOptionOffset,
&_options.getOrientation(),
sizeof(_options.getOrientation()));
volume = MAX_INJECTOR_VOLUME * _options.getVolume();
&_options.orientation,
sizeof(_options.orientation));
volume = MAX_INJECTOR_VOLUME * _options.volume;
memcpy(injectAudioPacket.data() + volumeOptionOffset, &volume, sizeof(volume));
// resize the QByteArray to the right size
@ -175,3 +236,13 @@ void AudioInjector::injectAudio() {
_isFinished = true;
emit finished();
}
void AudioInjector::stop() {
_shouldStop = true;
if (_options.localOnly) {
// we're only a local injector, so we can say we are finished right away too
_isFinished = true;
emit finished();
}
}

View file

@ -18,20 +18,29 @@
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include "AudioInjectorLocalBuffer.h"
#include "AudioInjectorOptions.h"
#include "Sound.h"
class AbstractAudioInterface;
class AudioInjector : public QObject {
Q_OBJECT
public:
AudioInjector(QObject* parent);
AudioInjector(Sound* sound, const AudioInjectorOptions& injectorOptions);
~AudioInjector();
bool isFinished() const { return _isFinished; }
int getCurrentSendPosition() const { return _currentSendPosition; }
AudioInjectorLocalBuffer* getLocalBuffer() const { return _localBuffer; }
bool isLocalOnly() const { return _options.localOnly; }
void setLocalAudioInterface(AbstractAudioInterface* localAudioInterface) { _localAudioInterface = localAudioInterface; }
public slots:
void injectAudio();
void stop() { _shouldStop = true; }
void stop();
void setOptions(AudioInjectorOptions& options);
void setCurrentSendPosition(int currentSendPosition) { _currentSendPosition = currentSendPosition; }
float getLoudness();
@ -39,15 +48,22 @@ public slots:
signals:
void finished();
private:
void injectToMixer();
void injectLocally();
Sound* _sound;
AudioInjectorOptions _options;
bool _shouldStop;
float _loudness;
bool _isFinished;
int _currentSendPosition;
AbstractAudioInterface* _localAudioInterface;
AudioInjectorLocalBuffer* _localBuffer;
};
Q_DECLARE_METATYPE(AudioInjector*)
QScriptValue injectorToScriptValue(QScriptEngine* engine, AudioInjector* const& in);
void injectorFromScriptValue(const QScriptValue& object, AudioInjector*& out);
#endif // hifi_AudioInjector_h

View file

@ -0,0 +1,74 @@
//
// AudioInjectorLocalBuffer.cpp
// libraries/audio/src
//
// Created by Stephen Birarda on 2014-11-11.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "AudioInjectorLocalBuffer.h"
AudioInjectorLocalBuffer::AudioInjectorLocalBuffer(const QByteArray& rawAudioArray, QObject* parent) :
QIODevice(parent),
_rawAudioArray(rawAudioArray),
_shouldLoop(false),
_isStopped(false),
_currentOffset(0)
{
}
void AudioInjectorLocalBuffer::stop() {
_isStopped = true;
QIODevice::close();
}
qint64 AudioInjectorLocalBuffer::readData(char* data, qint64 maxSize) {
if (!_isStopped) {
// first copy to the end of the raw audio
int bytesToEnd = _rawAudioArray.size() - _currentOffset;
int bytesRead = maxSize;
if (maxSize > bytesToEnd) {
bytesRead = bytesToEnd;
}
memcpy(data, _rawAudioArray.data() + _currentOffset, bytesRead);
// now check if we are supposed to loop and if we can copy more from the beginning
if (_shouldLoop && maxSize != bytesRead) {
bytesRead += recursiveReadFromFront(data + bytesRead, maxSize - bytesRead);
} else {
_currentOffset += bytesRead;
}
return bytesRead;
} else {
return 0;
}
}
qint64 AudioInjectorLocalBuffer::recursiveReadFromFront(char* data, qint64 maxSize) {
// see how much we can get in this pass
int bytesRead = maxSize;
if (bytesRead > _rawAudioArray.size()) {
bytesRead = _rawAudioArray.size();
}
// copy that amount
memcpy(data, _rawAudioArray.data(), bytesRead);
// check if we need to call ourselves again and pull from the front again
if (bytesRead < maxSize) {
return bytesRead + recursiveReadFromFront(data, maxSize);
} else {
_currentOffset = bytesRead;
return bytesRead;
}
}

View file

@ -0,0 +1,40 @@
//
// AudioInjectorLocalBuffer.h
// libraries/audio/src
//
// Created by Stephen Birarda on 2014-11-11.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioInjectorLocalBuffer_h
#define hifi_AudioInjectorLocalBuffer_h
#include <QtCore/qiodevice.h>
class AudioInjectorLocalBuffer : public QIODevice {
Q_OBJECT
public:
AudioInjectorLocalBuffer(const QByteArray& rawAudioArray, QObject* parent);
void stop();
qint64 readData(char* data, qint64 maxSize);
qint64 writeData(const char* data, qint64 maxSize) { return 0; }
void setShouldLoop(bool shouldLoop) { _shouldLoop = shouldLoop; }
private:
qint64 recursiveReadFromFront(char* data, qint64 maxSize);
QByteArray _rawAudioArray;
bool _shouldLoop;
bool _isStopped;
int _currentOffset;
};
#endif // hifi_AudioInjectorLocalBuffer_h

View file

@ -9,33 +9,60 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <RegisteredMetaTypes.h>
#include "AudioInjectorOptions.h"
AudioInjectorOptions::AudioInjectorOptions(QObject* parent) :
QObject(parent),
_position(0.0f, 0.0f, 0.0f),
_volume(1.0f),
_loop(false),
_orientation(glm::vec3(0.0f, 0.0f, 0.0f)),
_isStereo(false),
_ignorePenumbra(false)
AudioInjectorOptions::AudioInjectorOptions() :
position(0.0f, 0.0f, 0.0f),
volume(1.0f),
loop(false),
orientation(glm::vec3(0.0f, 0.0f, 0.0f)),
stereo(false),
ignorePenumbra(false),
localOnly(false)
{
}
AudioInjectorOptions::AudioInjectorOptions(const AudioInjectorOptions& other) {
_position = other._position;
_volume = other._volume;
_loop = other._loop;
_orientation = other._orientation;
_isStereo = other._isStereo;
_ignorePenumbra = other._ignorePenumbra;
QScriptValue injectorOptionsToScriptValue(QScriptEngine* engine, const AudioInjectorOptions& injectorOptions) {
QScriptValue obj = engine->newObject();
obj.setProperty("position", vec3toScriptValue(engine, injectorOptions.position));
obj.setProperty("volume", injectorOptions.volume);
obj.setProperty("loop", injectorOptions.loop);
obj.setProperty("orientation", quatToScriptValue(engine, injectorOptions.orientation));
obj.setProperty("stereo", injectorOptions.stereo);
obj.setProperty("ignorePenumbra", injectorOptions.ignorePenumbra);
obj.setProperty("localOnly", injectorOptions.localOnly);
return obj;
}
void AudioInjectorOptions::operator=(const AudioInjectorOptions& other) {
_position = other._position;
_volume = other._volume;
_loop = other._loop;
_orientation = other._orientation;
_isStereo = other._isStereo;
_ignorePenumbra = other._ignorePenumbra;
}
void injectorOptionsFromScriptValue(const QScriptValue& object, AudioInjectorOptions& injectorOptions) {
if (object.property("position").isValid()) {
vec3FromScriptValue(object.property("position"), injectorOptions.position);
}
if (object.property("volume").isValid()) {
injectorOptions.volume = object.property("volume").toNumber();
}
if (object.property("loop").isValid()) {
injectorOptions.loop = object.property("loop").toBool();
}
if (object.property("orientation").isValid()) {
quatFromScriptValue(object.property("orientation"), injectorOptions.orientation);
}
if (object.property("stereo").isValid()) {
injectorOptions.stereo = object.property("stereo").toBool();
}
if (object.property("ignorePenumbra").isValid()) {
injectorOptions.ignorePenumbra = object.property("ignorePenumbra").toBool();
}
if (object.property("localOnly").isValid()) {
injectorOptions.localOnly = object.property("localOnly").toBool();
}
}

View file

@ -12,54 +12,26 @@
#ifndef hifi_AudioInjectorOptions_h
#define hifi_AudioInjectorOptions_h
#include <QtCore/QObject>
#include <QtScript/qscriptengine.h>
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <RegisteredMetaTypes.h>
class AudioInjectorOptions : public QObject {
Q_OBJECT
Q_PROPERTY(glm::quat orientation READ getOrientation WRITE setOrientation)
Q_PROPERTY(glm::vec3 position READ getPosition WRITE setPosition)
Q_PROPERTY(float volume READ getVolume WRITE setVolume)
Q_PROPERTY(bool loop READ getLoop WRITE setLoop)
Q_PROPERTY(bool isStereo READ isStereo WRITE setIsStereo)
Q_PROPERTY(bool ignorePenumbra READ ignorePenumbra WRITE setIgnorePenumbra)
class AudioInjectorOptions {
public:
AudioInjectorOptions(QObject* parent = 0);
AudioInjectorOptions(const AudioInjectorOptions& other);
void operator=(const AudioInjectorOptions& other);
const glm::vec3& getPosition() const { return _position; }
void setPosition(const glm::vec3& position) { _position = position; }
float getVolume() const { return _volume; }
void setVolume(float volume) { _volume = volume; }
bool getLoop() const { return _loop; }
void setLoop(bool loop) { _loop = loop; }
const glm::quat& getOrientation() const { return _orientation; }
void setOrientation(const glm::quat& orientation) { _orientation = orientation; }
const bool isStereo() const { return _isStereo; }
void setIsStereo(const bool isStereo) { _isStereo = isStereo; }
const bool ignorePenumbra() const {return _ignorePenumbra; }
void setIgnorePenumbra(bool ignorePenumbra) { _ignorePenumbra = ignorePenumbra; }
private:
glm::vec3 _position;
float _volume;
bool _loop;
glm::quat _orientation;
bool _isStereo;
bool _ignorePenumbra;
AudioInjectorOptions();
glm::vec3 position;
float volume;
bool loop;
glm::quat orientation;
bool stereo;
bool ignorePenumbra;
bool localOnly;
};
Q_DECLARE_METATYPE(AudioInjectorOptions)
Q_DECLARE_METATYPE(AudioInjectorOptions);
QScriptValue injectorOptionsToScriptValue(QScriptEngine* engine, const AudioInjectorOptions& injectorOptions);
void injectorOptionsFromScriptValue(const QScriptValue& object, AudioInjectorOptions& injectorOptions);
#endif // hifi_AudioInjectorOptions_h

View file

@ -11,15 +11,20 @@
#include "AudioScriptingInterface.h"
void registerAudioMetaTypes(QScriptEngine* engine) {
qScriptRegisterMetaType(engine, injectorOptionsToScriptValue, injectorOptionsFromScriptValue);
qScriptRegisterMetaType(engine, soundToScriptValue, soundFromScriptValue);
}
AudioScriptingInterface& AudioScriptingInterface::getInstance() {
static AudioScriptingInterface staticInstance;
return staticInstance;
}
AudioScriptingInterface::AudioScriptingInterface() :
_localLoopbackInterface(NULL)
_localAudioInterface(NULL)
{
qRegisterMetaType<AudioInjectorOptions>("AudioInjectorOptions");
}
void AudioScriptingInterface::stopAllInjectors() {
@ -37,24 +42,10 @@ void AudioScriptingInterface::stopAllInjectors() {
}
}
void AudioScriptingInterface::playLocalSound(Sound* sound, const AudioInjectorOptions* injectorOptions) {
if (sound->isStereo()) {
const_cast<AudioInjectorOptions*>(injectorOptions)->setIsStereo(true);
}
AudioInjector* AudioScriptingInterface::playSound(Sound* sound, const AudioInjectorOptions& injectorOptions) {
// assume that localAudioInterface could be on a separate thread, use Qt::AutoConnection to handle properly
QMetaObject::invokeMethod(_localLoopbackInterface, "handleAudioByteArray",
Qt::AutoConnection,
Q_ARG(QByteArray, sound->getByteArray()),
Q_ARG(const AudioInjectorOptions&, *injectorOptions));
}
AudioInjector* AudioScriptingInterface::playSound(Sound* sound, const AudioInjectorOptions* injectorOptions) {
if (sound->isStereo()) {
const_cast<AudioInjectorOptions*>(injectorOptions)->setIsStereo(true);
}
AudioInjector* injector = new AudioInjector(sound, *injectorOptions);
AudioInjector* injector = new AudioInjector(sound, injectorOptions);
injector->setLocalAudioInterface(_localAudioInterface);
QThread* injectorThread = new QThread();

View file

@ -18,8 +18,6 @@
#include "AudioInjector.h"
#include "Sound.h"
const AudioInjectorOptions DEFAULT_INJECTOR_OPTIONS;
class AudioScriptingInterface : public QObject {
Q_OBJECT
public:
@ -27,13 +25,12 @@ public:
void stopAllInjectors();
void setLocalLoopbackInterface(AbstractAudioInterface* audioInterface) { _localLoopbackInterface = audioInterface; }
void setLocalAudioInterface(AbstractAudioInterface* audioInterface) { _localAudioInterface = audioInterface; }
public slots:
static float getLoudness(AudioInjector* injector);
void playLocalSound(Sound *sound, const AudioInjectorOptions* injectorOptions = NULL);
AudioInjector* playSound(Sound* sound, const AudioInjectorOptions* injectorOptions = NULL);
AudioInjector* playSound(Sound* sound, const AudioInjectorOptions& injectorOptions = AudioInjectorOptions());
void stopInjector(AudioInjector* injector);
bool isInjectorPlaying(AudioInjector* injector);
@ -43,6 +40,9 @@ public slots:
private:
AudioScriptingInterface();
QList< QPointer<AudioInjector> > _activeInjectors;
AbstractAudioInterface* _localLoopbackInterface;
AbstractAudioInterface* _localAudioInterface;
};
void registerAudioMetaTypes(QScriptEngine* engine);
#endif // hifi_AudioScriptingInterface_h

View file

@ -29,6 +29,15 @@
#include "AudioEditBuffer.h"
#include "Sound.h"
QScriptValue soundToScriptValue(QScriptEngine* engine, Sound* const& in) {
return engine->newQObject(in);
}
void soundFromScriptValue(const QScriptValue& object, Sound*& out) {
out = qobject_cast<Sound*>(object.toQObject());
}
// procedural audio version of Sound
Sound::Sound(float volume, float frequency, float duration, float decay, QObject* parent) :
QObject(parent),

View file

@ -14,6 +14,7 @@
#include <QtCore/QObject>
#include <QtNetwork/QNetworkReply>
#include <QtScript/qscriptengine.h>
class Sound : public QObject {
Q_OBJECT
@ -44,4 +45,9 @@ private slots:
void replyError(QNetworkReply::NetworkError code);
};
Q_DECLARE_METATYPE(Sound*)
QScriptValue soundToScriptValue(QScriptEngine* engine, Sound* const& in);
void soundFromScriptValue(const QScriptValue& object, Sound*& out);
#endif // hifi_Sound_h

View file

@ -166,8 +166,8 @@ void Player::pausePlayer() {
void Player::setupAudioThread() {
_audioThread = new QThread();
_options.setPosition(_avatar->getPosition());
_options.setOrientation(_avatar->getOrientation());
_options.position = _avatar->getPosition();
_options.orientation = _avatar->getOrientation();
_injector.reset(new AudioInjector(_recording->getAudio(), _options), &QObject::deleteLater);
_injector->moveToThread(_audioThread);
_audioThread->start();
@ -292,8 +292,8 @@ void Player::play() {
qDebug() << "WARNING: Player couldn't find head data.";
}
_options.setPosition(_avatar->getPosition());
_options.setOrientation(_avatar->getOrientation());
_options.position = _avatar->getPosition();
_options.orientation = _avatar->getOrientation();
_injector->setOptions(_options);
}
@ -360,7 +360,7 @@ void Player::setCurrentTime(int currentTime) {
}
void Player::setVolume(float volume) {
_options.setVolume(volume);
_options.volume = volume;
if (_injector) {
_injector->setOptions(_options);
}

View file

@ -74,14 +74,6 @@ void avatarDataFromScriptValue(const QScriptValue &object, AvatarData* &out) {
out = qobject_cast<AvatarData*>(object.toQObject());
}
QScriptValue injectorToScriptValue(QScriptEngine* engine, AudioInjector* const &in) {
return engine->newQObject(in);
}
void injectorFromScriptValue(const QScriptValue &object, AudioInjector* &out) {
out = qobject_cast<AudioInjector*>(object.toQObject());
}
QScriptValue inputControllerToScriptValue(QScriptEngine *engine, AbstractInputController* const &in) {
return engine->newQObject(in);
}
@ -234,7 +226,6 @@ bool ScriptEngine::setScriptContents(const QString& scriptContents, const QStrin
return true;
}
Q_SCRIPT_DECLARE_QMETAOBJECT(AudioInjectorOptions, QObject*)
Q_SCRIPT_DECLARE_QMETAOBJECT(LocalVoxels, QString)
void ScriptEngine::init() {
@ -254,6 +245,7 @@ void ScriptEngine::init() {
registerMenuItemProperties(this);
registerAnimationTypes(this);
registerAvatarTypes(this);
registerAudioMetaTypes(this);
Bitstream::registerTypes(this);
qScriptRegisterMetaType(this, EntityItemPropertiesToScriptValue, EntityItemPropertiesFromScriptValue);
@ -275,9 +267,6 @@ void ScriptEngine::init() {
QScriptValue soundMetaObject = newQMetaObject(&Sound::staticMetaObject, soundConstructorValue);
globalObject().setProperty("Sound", soundMetaObject);
QScriptValue injectionOptionValue = scriptValueFromQMetaObject<AudioInjectorOptions>();
globalObject().setProperty("AudioInjectionOptions", injectionOptionValue);
QScriptValue localVoxelsValue = scriptValueFromQMetaObject<LocalVoxels>();
globalObject().setProperty("LocalVoxels", localVoxelsValue);

View file

@ -0,0 +1,71 @@
//
// Transform.cpp
// shared/src/gpu
//
// Created by Sam Gateau on 11/4/2014.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "Transform.h"
void Transform::evalRotationScale(Quat& rotation, Vec3& scale, const Mat3& rotationScaleMatrix) {
const float ACCURACY_THREASHOLD = 0.00001f;
// Following technique taken from:
// http://callumhay.blogspot.com/2010/10/decomposing-affine-transforms.html
// Extract the rotation component - this is done using polar decompostion, where
// we successively average the matrix with its inverse transpose until there is
// no/a very small difference between successive averages
float norm;
int count = 0;
Mat3 rotationMat = rotationScaleMatrix;
do {
Mat3 currInvTranspose = glm::inverse(glm::transpose(rotationMat));
Mat3 nextRotation = 0.5f * (rotationMat + currInvTranspose);
norm = 0.0;
for (int i = 0; i < 3; i++) {
float n = static_cast<float>(
fabs(rotationMat[0][i] - nextRotation[0][i]) +
fabs(rotationMat[1][i] - nextRotation[1][i]) +
fabs(rotationMat[2][i] - nextRotation[2][i]));
norm = (norm > n ? norm : n);
}
rotationMat = nextRotation;
} while (count < 100 && norm > ACCURACY_THREASHOLD);
// extract scale of the matrix as the length of each axis
Mat3 scaleMat = glm::inverse(rotationMat) * rotationScaleMatrix;
scale = glm::max(Vec3(ACCURACY_THREASHOLD), Vec3(scaleMat[0][0], scaleMat[1][1], scaleMat[2][2]));
// Let's work on a local matrix containing rotation only
Mat3 matRot(
rotationScaleMatrix[0] / scale.x,
rotationScaleMatrix[1] / scale.y,
rotationScaleMatrix[2] / scale.z);
// Beware!!! needs to detect for the case there is a negative scale
// Based on the determinant sign we just can flip the scale sign of one component: we choose X axis
float determinant = glm::determinant(matRot);
if (determinant < 0.f) {
scale.x = -scale.x;
matRot[0] *= -1.f;
}
// Beware: even though the matRot is supposed to be normalized at that point,
// glm::quat_cast doesn't always return a normalized quaternion...
// rotation = glm::normalize(glm::quat_cast(matRot));
rotation = (glm::quat_cast(matRot));
}

View file

@ -0,0 +1,397 @@
//
// Transform.h
// shared/src/gpu
//
// Created by Sam Gateau on 11/4/2014.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_Transform_h
#define hifi_gpu_Transform_h
#include <assert.h>
#include <glm/glm.hpp>
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtx/quaternion.hpp>
#include <bitset>
class Transform {
public:
typedef glm::mat4 Mat4;
typedef glm::mat3 Mat3;
typedef glm::vec4 Vec4;
typedef glm::vec3 Vec3;
typedef glm::vec2 Vec2;
typedef glm::quat Quat;
Transform() :
_translation(0),
_rotation(1.0f, 0, 0, 0),
_scale(1.0f),
_flags(FLAG_CACHE_INVALID_BITSET) // invalid cache
{
}
Transform(const Transform& transform) :
_translation(transform._translation),
_rotation(transform._rotation),
_scale(transform._scale),
_flags(transform._flags)
{
invalidCache();
}
Transform(const Mat4& raw) {
evalFromRawMatrix(raw);
}
~Transform() {}
void setIdentity();
const Vec3& getTranslation() const;
void setTranslation(const Vec3& translation);
void preTranslate(const Vec3& translation);
void postTranslate(const Vec3& translation);
const Quat& getRotation() const;
void setRotation(const Quat& rotation);
void preRotate(const Quat& rotation);
void postRotate(const Quat& rotation);
const Vec3& getScale() const;
void setScale(float scale);
void setScale(const Vec3& scale);
void postScale(float scale);
void postScale(const Vec3& scale);
bool isIdentity() const { return (_flags & ~Flags(FLAG_CACHE_INVALID_BITSET)).none(); }
bool isTranslating() const { return _flags[FLAG_TRANSLATION]; }
bool isRotating() const { return _flags[FLAG_ROTATION]; }
bool isScaling() const { return _flags[FLAG_SCALING]; }
bool isUniform() const { return !isNonUniform(); }
bool isNonUniform() const { return _flags[FLAG_NON_UNIFORM]; }
void evalFromRawMatrix(const Mat4& matrix);
void evalFromRawMatrix(const Mat3& rotationScalematrix);
Mat4& getMatrix(Mat4& result) const;
Mat4& getInverseMatrix(Mat4& result) const;
Transform& evalInverse(Transform& result) const;
static void evalRotationScale(Quat& rotation, Vec3& scale, const Mat3& rotationScaleMatrix);
static Transform& mult(Transform& result, const Transform& left, const Transform& right);
// Left will be inversed before the multiplication
static Transform& inverseMult(Transform& result, const Transform& left, const Transform& right);
protected:
enum Flag {
FLAG_CACHE_INVALID = 0,
FLAG_TRANSLATION,
FLAG_ROTATION,
FLAG_SCALING,
FLAG_NON_UNIFORM,
FLAG_ZERO_SCALE,
FLAG_PROJECTION,
NUM_FLAGS,
FLAG_CACHE_INVALID_BITSET = 1,
};
typedef std::bitset<NUM_FLAGS> Flags;
// TRS
Vec3 _translation;
Quat _rotation;
Vec3 _scale;
mutable Flags _flags;
// Cached transform
mutable Mat4 _matrix;
bool isCacheInvalid() const { return _flags[FLAG_CACHE_INVALID]; }
void validCache() const { _flags.set(FLAG_CACHE_INVALID, false); }
void invalidCache() const { _flags.set(FLAG_CACHE_INVALID, true); }
void flagTranslation() { _flags.set(FLAG_TRANSLATION, true); }
void flagRotation() { _flags.set(FLAG_ROTATION, true); }
void flagScaling() { _flags.set(FLAG_SCALING, true); }
void unflagScaling() { _flags.set(FLAG_SCALING, false); }
void flagUniform() { _flags.set(FLAG_NON_UNIFORM, false); }
void flagNonUniform() { _flags.set(FLAG_NON_UNIFORM, true); }
void updateCache() const;
};
inline void Transform::setIdentity() {
_translation = Vec3(0);
_rotation = Quat(1.0f, 0, 0, 0);
_scale = Vec3(1.0f);
_flags = Flags(FLAG_CACHE_INVALID_BITSET);
}
inline const Transform::Vec3& Transform::getTranslation() const {
return _translation;
}
inline void Transform::setTranslation(const Vec3& translation) {
invalidCache();
flagTranslation();
_translation = translation;
}
inline void Transform::preTranslate(const Vec3& translation) {
invalidCache();
flagTranslation();
_translation += translation;
}
inline void Transform::postTranslate(const Vec3& translation) {
invalidCache();
flagTranslation();
Vec3 scaledT = translation;
if (isScaling()) scaledT *= _scale;
if (isRotating()) {
_translation += glm::rotate(_rotation, scaledT);
} else {
_translation += scaledT;
}
}
inline const Transform::Quat& Transform::getRotation() const {
return _rotation;
}
inline void Transform::setRotation(const Quat& rotation) {
invalidCache();
flagRotation();
_rotation = rotation;
}
inline void Transform::preRotate(const Quat& rotation) {
invalidCache();
if (isRotating()) {
_rotation = rotation * _rotation;
} else {
_rotation = rotation;
}
flagRotation();
_translation = glm::rotate(rotation, _translation);
}
inline void Transform::postRotate(const Quat& rotation) {
invalidCache();
if (isNonUniform()) {
Quat newRot;
Vec3 newScale;
Mat3 scaleRot(glm::mat3_cast(rotation));
scaleRot[0] *= _scale;
scaleRot[1] *= _scale;
scaleRot[2] *= _scale;
evalRotationScale(newRot, newScale, scaleRot);
if (isRotating()) {
_rotation *= newRot;
} else {
_rotation = newRot;
}
setScale(newScale);
} else {
if (isRotating()) {
_rotation *= rotation;
} else {
_rotation = rotation;
}
}
flagRotation();
}
inline const Transform::Vec3& Transform::getScale() const {
return _scale;
}
inline void Transform::setScale(float scale) {
invalidCache();
flagUniform();
if (scale == 1.0f) {
unflagScaling();
} else {
flagScaling();
}
_scale = Vec3(scale);
}
inline void Transform::setScale(const Vec3& scale) {
if ((scale.x == scale.y) && (scale.x == scale.z)) {
setScale(scale.x);
} else {
invalidCache();
flagScaling();
flagNonUniform();
_scale = scale;
}
}
inline void Transform::postScale(float scale) {
if (scale == 1.0f) return;
if (isScaling()) {
// if already scaling, just invalid cache and aply uniform scale
invalidCache();
_scale *= scale;
} else {
setScale(scale);
}
}
inline void Transform::postScale(const Vec3& scale) {
invalidCache();
if (isScaling()) {
_scale *= scale;
} else {
_scale = scale;
}
flagScaling();
}
inline Transform::Mat4& Transform::getMatrix(Transform::Mat4& result) const {
updateCache();
result = _matrix;
return result;
}
inline Transform::Mat4& Transform::getInverseMatrix(Transform::Mat4& result) const {
Transform inverse;
evalInverse(inverse);
return inverse.getMatrix(result);
}
inline void Transform::evalFromRawMatrix(const Mat4& matrix) {
// for now works only in the case of TRS transformation
if ((matrix[0][3] == 0) && (matrix[1][3] == 0) && (matrix[2][3] == 0) && (matrix[3][3] == 1.f)) {
setTranslation(Vec3(matrix[3]));
evalFromRawMatrix(Mat3(matrix));
}
}
inline void Transform::evalFromRawMatrix(const Mat3& rotationScaleMatrix) {
Quat rotation;
Vec3 scale;
evalRotationScale(rotation, scale, rotationScaleMatrix);
setRotation(rotation);
setScale(scale);
}
inline Transform& Transform::evalInverse(Transform& inverse) const {
inverse.setIdentity();
if (isScaling()) {
// TODO: At some point we will face the case when scale is 0 and so 1/0 will blow up...
// WHat should we do for this one?
assert(_scale.x != 0);
assert(_scale.y != 0);
assert(_scale.z != 0);
if (isNonUniform()) {
inverse.setScale(Vec3(1.0f/_scale.x, 1.0f/_scale.y, 1.0f/_scale.z));
} else {
inverse.setScale(1.0f/_scale.x);
}
}
if (isRotating()) {
inverse.postRotate(glm::conjugate(_rotation));
}
if (isTranslating()) {
inverse.postTranslate(-_translation);
}
return inverse;
}
inline Transform& Transform::mult( Transform& result, const Transform& left, const Transform& right) {
result = left;
if (right.isTranslating()) {
result.postTranslate(right.getTranslation());
}
if (right.isRotating()) {
result.postRotate(right.getRotation());
}
if (right.isScaling()) {
result.postScale(right.getScale());
}
// HACK: In case of an issue in the Transform multiplication results, to make sure this code is
// working properly uncomment the next 2 lines and compare the results, they should be the same...
// Transform::Mat4 mv = left.getMatrix() * right.getMatrix();
// Transform::Mat4 mv2 = result.getMatrix();
return result;
}
inline Transform& Transform::inverseMult( Transform& result, const Transform& left, const Transform& right) {
result.setIdentity();
if (left.isScaling()) {
const Vec3& s = left.getScale();
result.setScale(Vec3(1.0f / s.x, 1.0f / s.y, 1.0f / s.z));
}
if (left.isRotating()) {
result.postRotate(glm::conjugate(left.getRotation()));
}
if (left.isTranslating() || right.isTranslating()) {
result.postTranslate(right.getTranslation() - left.getTranslation());
}
if (right.isRotating()) {
result.postRotate(right.getRotation());
}
if (right.isScaling()) {
result.postScale(right.getScale());
}
// HACK: In case of an issue in the Transform multiplication results, to make sure this code is
// working properly uncomment the next 2 lines and compare the results, they should be the same...
// Transform::Mat4 mv = left.getMatrix() * right.getMatrix();
// Transform::Mat4 mv2 = result.getMatrix();
return result;
}
inline void Transform::updateCache() const {
if (isCacheInvalid()) {
if (isRotating()) {
Mat3 rot = glm::mat3_cast(_rotation);
if (isScaling()) {
rot[0] *= _scale.x;
rot[1] *= _scale.y;
rot[2] *= _scale.z;
}
_matrix[0] = Vec4(rot[0], 0.f);
_matrix[1] = Vec4(rot[1], 0.f);
_matrix[2] = Vec4(rot[2], 0.f);
} else {
_matrix[0] = Vec4(_scale.x, 0.f, 0.f, 0.f);
_matrix[1] = Vec4(0.f, _scale.y, 0.f, 0.f);
_matrix[2] = Vec4(0.f, 0.f, _scale.z, 0.f);
}
_matrix[3] = Vec4(_translation, 1.0f);
validCache();
}
}
#endif