mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-08-13 02:05:19 +02:00
Merge branch 'master' of https://github.com/worklist/hifi
This commit is contained in:
commit
300708b3a2
59 changed files with 620 additions and 603 deletions
|
@ -23,6 +23,7 @@
|
|||
#include <NodeList.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <ResourceCache.h>
|
||||
#include <SoundCache.h>
|
||||
#include <UUID.h>
|
||||
#include <VoxelConstants.h>
|
||||
|
||||
|
@ -217,6 +218,8 @@ void Agent::run() {
|
|||
_scriptEngine.registerGlobalObject("Agent", this);
|
||||
|
||||
_scriptEngine.init(); // must be done before we set up the viewers
|
||||
|
||||
_scriptEngine.registerGlobalObject("SoundCache", &SoundCache::getInstance());
|
||||
|
||||
_scriptEngine.registerGlobalObject("VoxelViewer", &_voxelViewer);
|
||||
// connect the VoxelViewer and the VoxelScriptingInterface to each other
|
||||
|
|
|
@ -774,7 +774,8 @@ void AudioMixer::run() {
|
|||
nodeData->checkBuffersBeforeFrameSend();
|
||||
|
||||
// if the stream should be muted, send mute packet
|
||||
if (shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) {
|
||||
if (nodeData->getAvatarAudioStream()
|
||||
&& shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) {
|
||||
static const int TIME_BETWEEN_MUTES = 5; // in secs
|
||||
if (usecTimestampNow() - nodeData->getAvatarAudioStream()->getLastMuted() >
|
||||
TIME_BETWEEN_MUTES * USECS_PER_SECOND) {
|
||||
|
|
|
@ -34,22 +34,22 @@ var guitarModel = HIFI_PUBLIC_BUCKET + "models/attachments/guitar.fst";
|
|||
|
||||
var chords = new Array();
|
||||
// Nylon string guitar
|
||||
chords[1] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+A.raw");
|
||||
chords[2] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+B.raw");
|
||||
chords[3] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+E.raw");
|
||||
chords[4] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+G.raw");
|
||||
chords[1] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+A.raw");
|
||||
chords[2] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+B.raw");
|
||||
chords[3] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+E.raw");
|
||||
chords[4] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+G.raw");
|
||||
|
||||
// Electric guitar
|
||||
chords[5] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Metal+A+short.raw");
|
||||
chords[6] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Metal+B+short.raw");
|
||||
chords[7] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Metal+E+short.raw");
|
||||
chords[8] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Metal+G+short.raw");
|
||||
chords[5] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Metal+A+short.raw");
|
||||
chords[6] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Metal+B+short.raw");
|
||||
chords[7] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Metal+E+short.raw");
|
||||
chords[8] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Metal+G+short.raw");
|
||||
|
||||
// Steel Guitar
|
||||
chords[9] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Steel+A.raw");
|
||||
chords[10] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Steel+B.raw");
|
||||
chords[11] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Steel+E.raw");
|
||||
chords[12] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Steel+G.raw");
|
||||
chords[9] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Steel+A.raw");
|
||||
chords[10] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Steel+B.raw");
|
||||
chords[11] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Steel+E.raw");
|
||||
chords[12] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Steel+G.raw");
|
||||
|
||||
var NUM_CHORDS = 4;
|
||||
var NUM_GUITARS = 3;
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
Script.include("libraries/globals.js");
|
||||
|
||||
var sound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Animals/mexicanWhipoorwill.raw");
|
||||
var sound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Animals/mexicanWhipoorwill.raw");
|
||||
var CHANCE_OF_PLAYING_SOUND = 0.01;
|
||||
|
||||
var FACTOR = 0.05;
|
||||
|
|
|
@ -24,28 +24,28 @@ var audioOptions = {
|
|||
}
|
||||
|
||||
var hitSounds = new Array();
|
||||
hitSounds[0] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit1.raw");
|
||||
hitSounds[1] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit2.raw");
|
||||
hitSounds[2] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit3.raw");
|
||||
hitSounds[3] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit4.raw");
|
||||
hitSounds[4] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit5.raw");
|
||||
hitSounds[5] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit6.raw");
|
||||
hitSounds[6] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit7.raw");
|
||||
hitSounds[7] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit8.raw");
|
||||
hitSounds[8] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit9.raw");
|
||||
hitSounds[9] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit10.raw");
|
||||
hitSounds[10] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit11.raw");
|
||||
hitSounds[11] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit12.raw");
|
||||
hitSounds[12] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit13.raw");
|
||||
hitSounds[13] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit14.raw");
|
||||
hitSounds[14] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit15.raw");
|
||||
hitSounds[15] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit16.raw");
|
||||
hitSounds[16] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit17.raw");
|
||||
hitSounds[17] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit18.raw");
|
||||
hitSounds[18] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit19.raw");
|
||||
hitSounds[19] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit20.raw");
|
||||
hitSounds[20] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit21.raw");
|
||||
hitSounds[21] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit22.raw");
|
||||
hitSounds[0] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit1.raw");
|
||||
hitSounds[1] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit2.raw");
|
||||
hitSounds[2] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit3.raw");
|
||||
hitSounds[3] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit4.raw");
|
||||
hitSounds[4] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit5.raw");
|
||||
hitSounds[5] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit6.raw");
|
||||
hitSounds[6] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit7.raw");
|
||||
hitSounds[7] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit8.raw");
|
||||
hitSounds[8] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit9.raw");
|
||||
hitSounds[9] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit10.raw");
|
||||
hitSounds[10] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit11.raw");
|
||||
hitSounds[11] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit12.raw");
|
||||
hitSounds[12] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit13.raw");
|
||||
hitSounds[13] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit14.raw");
|
||||
hitSounds[14] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit15.raw");
|
||||
hitSounds[15] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit16.raw");
|
||||
hitSounds[16] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit17.raw");
|
||||
hitSounds[17] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit18.raw");
|
||||
hitSounds[18] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit19.raw");
|
||||
hitSounds[19] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit20.raw");
|
||||
hitSounds[20] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit21.raw");
|
||||
hitSounds[21] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Collisions-hitsandslaps/Hit22.raw");
|
||||
|
||||
function playHitSound(mySessionID, theirSessionID, collision) {
|
||||
var now = new Date();
|
||||
|
|
|
@ -67,7 +67,7 @@ function maybePlaySound(deltaTime) {
|
|||
lifetime: 10
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
playing.push({ audioId: Audio.playSound(birds[whichBird].sound, options), entityId: entityId, lightId: lightId, color: birds[whichBird].color });
|
||||
}
|
||||
if (playing.length != numPlaying) {
|
||||
|
@ -159,8 +159,9 @@ function loadBirds() {
|
|||
var SOUND_BASE_URL = "http://public.highfidelity.io/sounds/Animals/";
|
||||
|
||||
for (var i = 0; i < sound_filenames.length; i++) {
|
||||
birds.push({ sound: new Sound(SOUND_BASE_URL + sound_filenames[i]),
|
||||
color: colors[i]
|
||||
} );
|
||||
birds.push({
|
||||
sound: SoundCache.getSound(SOUND_BASE_URL + sound_filenames[i]),
|
||||
color: colors[i]
|
||||
});
|
||||
}
|
||||
}
|
|
@ -228,6 +228,6 @@ function loadSounds() {
|
|||
var SOUND_BASE_URL = HIFI_PUBLIC_BUCKET + "sounds/Cocktail+Party+Snippets/Raws/";
|
||||
|
||||
for (var i = 0; i < sound_filenames.length; i++) {
|
||||
sounds.push(new Sound(SOUND_BASE_URL + sound_filenames[i]));
|
||||
sounds.push(SoundCache.getSound(SOUND_BASE_URL + sound_filenames[i]));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -151,11 +151,11 @@ function loadSounds() {
|
|||
var FOOTSTEP_BASE_URL = HIFI_PUBLIC_BUCKET + "sounds/Footsteps/";
|
||||
|
||||
for (var i = 0; i < sound_filenames.length; i++) {
|
||||
sounds.push(new Sound(SOUND_BASE_URL + sound_filenames[i]));
|
||||
sounds.push(SoundCache.getSound(SOUND_BASE_URL + sound_filenames[i]));
|
||||
}
|
||||
|
||||
for (var i = 0; i < footstep_filenames.length; i++) {
|
||||
footstepSounds.push(new Sound(FOOTSTEP_BASE_URL + footstep_filenames[i]));
|
||||
footstepSounds.push(SoundCache.getSound(FOOTSTEP_BASE_URL + footstep_filenames[i]));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -113,11 +113,11 @@ function loadSounds() {
|
|||
var FOOTSTEP_BASE_URL = HIFI_PUBLIC_BUCKET + "sounds/Footsteps/";
|
||||
|
||||
for (var i = 0; i < sound_filenames.length; i++) {
|
||||
sounds.push(new Sound(SOUND_BASE_URL + sound_filenames[i]));
|
||||
sounds.push(SoundCache.getSound(SOUND_BASE_URL + sound_filenames[i]));
|
||||
}
|
||||
|
||||
for (var i = 0; i < footstep_filenames.length; i++) {
|
||||
footstepSounds.push(new Sound(FOOTSTEP_BASE_URL + footstep_filenames[i]));
|
||||
footstepSounds.push(SoundCache.getSound(FOOTSTEP_BASE_URL + footstep_filenames[i]));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,16 +28,16 @@ var lastClapFrame = 0;
|
|||
var lastAnimFrame = 0;
|
||||
|
||||
var claps = [];
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap1Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap2Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap3Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap4Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap5Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap6Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap7Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap8Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap9Rvb.wav"));
|
||||
claps.push(new Sound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap10Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap1Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap2Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap3Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap4Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap5Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap6Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap7Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap8Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap9Rvb.wav"));
|
||||
claps.push(SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/claps/BClap10Rvb.wav"));
|
||||
var numberOfSounds = claps.length;
|
||||
|
||||
var clappingNow = false;
|
||||
|
|
|
@ -28,8 +28,8 @@ function vMinus(a, b) {
|
|||
|
||||
// First, load two percussion sounds to be used on the sticks
|
||||
|
||||
var drum1 = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Drums/RackTomHi.raw");
|
||||
var drum2 = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Drums/RackTomLo.raw");
|
||||
var drum1 = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Drums/RackTomHi.raw");
|
||||
var drum2 = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Drums/RackTomLo.raw");
|
||||
|
||||
// State Machine:
|
||||
// 0 = not triggered
|
||||
|
|
|
@ -1125,12 +1125,12 @@ var toolBar = (function () {
|
|||
browseModelsButton,
|
||||
loadURLMenuItem,
|
||||
loadFileMenuItem,
|
||||
menuItemWidth = 125,
|
||||
menuItemWidth,
|
||||
menuItemOffset,
|
||||
menuItemHeight,
|
||||
menuItemMargin = 5,
|
||||
menuTextColor = { red: 255, green: 255, blue: 255 },
|
||||
menuBackgoundColor = { red: 18, green: 66, blue: 66 };
|
||||
menuBackgroundColor = { red: 18, green: 66, blue: 66 };
|
||||
|
||||
function initialize() {
|
||||
toolBar = new ToolBar(0, 0, ToolBar.VERTICAL);
|
||||
|
@ -1167,9 +1167,8 @@ var toolBar = (function () {
|
|||
loadURLMenuItem = Overlays.addOverlay("text", {
|
||||
x: newModelButton.x - menuItemWidth,
|
||||
y: newModelButton.y + menuItemOffset,
|
||||
width: menuItemWidth,
|
||||
height: menuItemHeight,
|
||||
backgroundColor: menuBackgoundColor,
|
||||
backgroundColor: menuBackgroundColor,
|
||||
topMargin: menuItemMargin,
|
||||
text: "Model URL",
|
||||
alpha: 0.9,
|
||||
|
@ -1179,15 +1178,19 @@ var toolBar = (function () {
|
|||
loadFileMenuItem = Overlays.addOverlay("text", {
|
||||
x: newModelButton.x - menuItemWidth,
|
||||
y: newModelButton.y + menuItemOffset + menuItemHeight,
|
||||
width: menuItemWidth,
|
||||
height: menuItemHeight,
|
||||
backgroundColor: menuBackgoundColor,
|
||||
backgroundColor: menuBackgroundColor,
|
||||
topMargin: menuItemMargin,
|
||||
text: "Model File",
|
||||
alpha: 0.9,
|
||||
visible: false
|
||||
});
|
||||
|
||||
menuItemWidth = Math.max(Overlays.textWidth(loadURLMenuItem, "Model URL"),
|
||||
Overlays.textWidth(loadFileMenuItem, "Model File")) + 20;
|
||||
Overlays.editOverlay(loadURLMenuItem, { width: menuItemWidth });
|
||||
Overlays.editOverlay(loadFileMenuItem, { width: menuItemWidth });
|
||||
|
||||
newCubeButton = toolBar.addTool({
|
||||
imageURL: toolIconUrl + "add-cube.svg",
|
||||
subImage: { x: 0, y: Tool.IMAGE_WIDTH, width: Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT },
|
||||
|
|
|
@ -78,7 +78,7 @@ function SoundArray() {
|
|||
this.audioOptions = audioOptions
|
||||
this.sounds = new Array();
|
||||
this.addSound = function (soundURL) {
|
||||
this.sounds[this.sounds.length] = new Sound(soundURL);
|
||||
this.sounds[this.sounds.length] = SoundCache.getSound(soundURL);
|
||||
}
|
||||
this.play = function (index) {
|
||||
if (0 <= index && index < this.sounds.length) {
|
||||
|
|
|
@ -70,23 +70,23 @@ function addBird()
|
|||
var size;
|
||||
var which = Math.random();
|
||||
if (which < 0.2) {
|
||||
tweet = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Animals/bushtit_1.raw");
|
||||
tweet = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Animals/bushtit_1.raw");
|
||||
color = { red: 100, green: 50, blue: 120 };
|
||||
size = 0.08;
|
||||
} else if (which < 0.4) {
|
||||
tweet = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Animals/rosyfacedlovebird.raw");
|
||||
tweet = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Animals/rosyfacedlovebird.raw");
|
||||
color = { red: 100, green: 150, blue: 75 };
|
||||
size = 0.09;
|
||||
} else if (which < 0.6) {
|
||||
tweet = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Animals/saysphoebe.raw");
|
||||
tweet = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Animals/saysphoebe.raw");
|
||||
color = { red: 84, green: 121, blue: 36 };
|
||||
size = 0.05;
|
||||
} else if (which < 0.8) {
|
||||
tweet = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Animals/mexicanWhipoorwill.raw");
|
||||
tweet = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Animals/mexicanWhipoorwill.raw");
|
||||
color = { red: 23, green: 197, blue: 230 };
|
||||
size = 0.12;
|
||||
} else {
|
||||
tweet = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Animals/westernscreechowl.raw");
|
||||
tweet = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Animals/westernscreechowl.raw");
|
||||
color = { red: 50, green: 67, blue: 144 };
|
||||
size = 0.15;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
this.preload = function(entityID) {
|
||||
print("preload("+entityID.id+")");
|
||||
bird = new Sound("http://s3.amazonaws.com/hifi-public/sounds/Animals/bushtit_1.raw");
|
||||
bird = SoundCache.getSound("http://s3.amazonaws.com/hifi-public/sounds/Animals/bushtit_1.raw");
|
||||
};
|
||||
|
||||
this.clickDownOnEntity = function(entityID, mouseEvent) {
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
this.preload = function(entityID) {
|
||||
print("preload("+entityID.id+")");
|
||||
bird = new Sound("http://s3.amazonaws.com/hifi-public/sounds/Animals/bushtit_1.raw");
|
||||
bird = SoundCache.getSound("http://s3.amazonaws.com/hifi-public/sounds/Animals/bushtit_1.raw");
|
||||
};
|
||||
|
||||
this.enterEntity = function(entityID) {
|
||||
|
|
|
@ -160,9 +160,9 @@ var rightMouseControl = new MouseControl("RIGHT");
|
|||
var mouseControls = [leftMouseControl, middleMouseControl, rightMouseControl];
|
||||
var currentMouseControl = false;
|
||||
|
||||
var newSound = new Sound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/throw.raw");
|
||||
var catchSound = new Sound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/catch.raw");
|
||||
var throwSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Switches%20and%20sliders/slider%20-%20whoosh1.raw");
|
||||
var newSound = SoundCache.getSound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/throw.raw");
|
||||
var catchSound = SoundCache.getSound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/catch.raw");
|
||||
var throwSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Switches%20and%20sliders/slider%20-%20whoosh1.raw");
|
||||
|
||||
var simulatedFrisbees = [];
|
||||
|
||||
|
|
|
@ -36,11 +36,11 @@ var RELOAD_INTERVAL = 5;
|
|||
var showScore = false;
|
||||
|
||||
// Load some sound to use for loading and firing
|
||||
var fireSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guns/GUN-SHOT2.raw");
|
||||
var loadSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guns/Gun_Reload_Weapon22.raw");
|
||||
var impactSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guns/BulletImpact2.raw");
|
||||
var targetHitSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/hit.raw");
|
||||
var targetLaunchSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/shoot.raw");
|
||||
var fireSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guns/GUN-SHOT2.raw");
|
||||
var loadSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guns/Gun_Reload_Weapon22.raw");
|
||||
var impactSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guns/BulletImpact2.raw");
|
||||
var targetHitSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/hit.raw");
|
||||
var targetLaunchSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/shoot.raw");
|
||||
|
||||
var gunModel = "http://public.highfidelity.io/models/attachments/HaloGun.fst";
|
||||
|
||||
|
|
|
@ -35,11 +35,11 @@ var RELOAD_INTERVAL = 5;
|
|||
var showScore = false;
|
||||
|
||||
// Load some sound to use for loading and firing
|
||||
var fireSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guns/GUN-SHOT2.raw");
|
||||
var loadSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guns/Gun_Reload_Weapon22.raw");
|
||||
var impactSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guns/BulletImpact2.raw");
|
||||
var targetHitSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/hit.raw");
|
||||
var targetLaunchSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/shoot.raw");
|
||||
var fireSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guns/GUN-SHOT2.raw");
|
||||
var loadSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guns/Gun_Reload_Weapon22.raw");
|
||||
var impactSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guns/BulletImpact2.raw");
|
||||
var targetHitSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/hit.raw");
|
||||
var targetLaunchSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/shoot.raw");
|
||||
|
||||
var gunModel = "http://public.highfidelity.io/models/attachments/HaloGun.fst";
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ function activateWarp() {
|
|||
|
||||
var WATCH_AVATAR_DISTANCE = 2.5;
|
||||
|
||||
var sound = new Sound("http://public.highfidelity.io/sounds/Footsteps/FootstepW2Right-12db.wav");
|
||||
var sound = SoundCache.getSound("http://public.highfidelity.io/sounds/Footsteps/FootstepW2Right-12db.wav");
|
||||
function playSound() {
|
||||
Audio.playSound(sound, {
|
||||
position: MyAvatar.position
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
Script.include("libraries/globals.js");
|
||||
|
||||
var sound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/220Sine.wav");
|
||||
var sound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/220Sine.wav");
|
||||
|
||||
var soundPlaying = false;
|
||||
|
||||
|
|
|
@ -335,12 +335,12 @@ walkAssets = (function () {
|
|||
|
||||
// read in the sounds
|
||||
var _footsteps = [];
|
||||
_footsteps.push(new Sound(_pathToSounds+"FootstepW2Left-12db.wav"));
|
||||
_footsteps.push(new Sound(_pathToSounds+"FootstepW2Right-12db.wav"));
|
||||
_footsteps.push(new Sound(_pathToSounds+"FootstepW3Left-12db.wav"));
|
||||
_footsteps.push(new Sound(_pathToSounds+"FootstepW3Right-12db.wav"));
|
||||
_footsteps.push(new Sound(_pathToSounds+"FootstepW5Left-12db.wav"));
|
||||
_footsteps.push(new Sound(_pathToSounds+"FootstepW5Right-12db.wav"));
|
||||
_footsteps.push(SoundCache.getSound(_pathToSounds+"FootstepW2Left-12db.wav"));
|
||||
_footsteps.push(SoundCache.getSound(_pathToSounds+"FootstepW2Right-12db.wav"));
|
||||
_footsteps.push(SoundCache.getSound(_pathToSounds+"FootstepW3Left-12db.wav"));
|
||||
_footsteps.push(SoundCache.getSound(_pathToSounds+"FootstepW3Right-12db.wav"));
|
||||
_footsteps.push(SoundCache.getSound(_pathToSounds+"FootstepW5Left-12db.wav"));
|
||||
_footsteps.push(SoundCache.getSound(_pathToSounds+"FootstepW5Right-12db.wav"));
|
||||
|
||||
// load the animation datafiles
|
||||
Script.include(pathToAssets+"animations/dd-female-standard-walk-animation.js");
|
||||
|
|
|
@ -39,11 +39,11 @@ var ORB_SHIFT = { x: 0, y: -1.4, z: -0.8};
|
|||
|
||||
var HELMET_ATTACHMENT_URL = HIFI_PUBLIC_BUCKET + "models/attachments/IronManMaskOnly.fbx"
|
||||
|
||||
var droneSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/drone.raw")
|
||||
var droneSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/drone.raw")
|
||||
var currentDrone = null;
|
||||
|
||||
var latinSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/latin.raw")
|
||||
var elevatorSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/elevator.raw")
|
||||
var latinSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/latin.raw")
|
||||
var elevatorSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Lobby/elevator.raw")
|
||||
var currentMusak = null;
|
||||
|
||||
function reticlePosition() {
|
||||
|
|
|
@ -89,12 +89,12 @@ var toolBar = (function () {
|
|||
browseModelsButton,
|
||||
loadURLMenuItem,
|
||||
loadFileMenuItem,
|
||||
menuItemWidth = 125,
|
||||
menuItemWidth,
|
||||
menuItemOffset,
|
||||
menuItemHeight,
|
||||
menuItemMargin = 5,
|
||||
menuTextColor = { red: 255, green: 255, blue: 255 },
|
||||
menuBackgoundColor = { red: 18, green: 66, blue: 66 };
|
||||
menuBackgroundColor = { red: 18, green: 66, blue: 66 };
|
||||
|
||||
function initialize() {
|
||||
toolBar = new ToolBar(0, 0, ToolBar.VERTICAL);
|
||||
|
@ -131,9 +131,8 @@ var toolBar = (function () {
|
|||
loadURLMenuItem = Overlays.addOverlay("text", {
|
||||
x: newModelButton.x - menuItemWidth,
|
||||
y: newModelButton.y + menuItemOffset,
|
||||
width: menuItemWidth,
|
||||
height: menuItemHeight,
|
||||
backgroundColor: menuBackgoundColor,
|
||||
backgroundColor: menuBackgroundColor,
|
||||
topMargin: menuItemMargin,
|
||||
text: "Model URL",
|
||||
alpha: 0.9,
|
||||
|
@ -143,15 +142,19 @@ var toolBar = (function () {
|
|||
loadFileMenuItem = Overlays.addOverlay("text", {
|
||||
x: newModelButton.x - menuItemWidth,
|
||||
y: newModelButton.y + menuItemOffset + menuItemHeight,
|
||||
width: menuItemWidth,
|
||||
height: menuItemHeight,
|
||||
backgroundColor: menuBackgoundColor,
|
||||
backgroundColor: menuBackgroundColor,
|
||||
topMargin: menuItemMargin,
|
||||
text: "Model File",
|
||||
alpha: 0.9,
|
||||
visible: false
|
||||
});
|
||||
|
||||
menuItemWidth = Math.max(Overlays.textWidth(loadURLMenuItem, "Model URL"),
|
||||
Overlays.textWidth(loadFileMenuItem, "Model File")) + 20;
|
||||
Overlays.editOverlay(loadURLMenuItem, { width: menuItemWidth });
|
||||
Overlays.editOverlay(loadFileMenuItem, { width: menuItemWidth });
|
||||
|
||||
newCubeButton = toolBar.addTool({
|
||||
imageURL: toolIconUrl + "add-cube.svg",
|
||||
subImage: { x: 0, y: Tool.IMAGE_WIDTH, width: Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT },
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
Script.include("libraries/globals.js");
|
||||
|
||||
// First, load a sample sound from a URL
|
||||
var bird = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Animals/bushtit_1.raw");
|
||||
var bird = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Animals/bushtit_1.raw");
|
||||
|
||||
function maybePlaySound(deltaTime) {
|
||||
if (Math.random() < 0.01) {
|
||||
|
|
|
@ -15,9 +15,9 @@ Script.include("libraries/globals.js");
|
|||
|
||||
// A few sample files you may want to try:
|
||||
|
||||
var sound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+A.raw");
|
||||
//var sound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/220Sine.wav");
|
||||
//var sound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Cocktail+Party+Snippets/Bandcamp.wav");
|
||||
var sound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Guitars/Guitar+-+Nylon+A.raw");
|
||||
//var sound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/220Sine.wav");
|
||||
//var sound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Cocktail+Party+Snippets/Bandcamp.wav");
|
||||
|
||||
var soundPlaying = false;
|
||||
var options = {
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
Script.include("libraries/globals.js");
|
||||
|
||||
var soundClip = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Voxels/voxel create 3.raw");
|
||||
var soundClip = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Voxels/voxel create 3.raw");
|
||||
|
||||
var currentTime = 1.570079; // pi/2
|
||||
var deltaTime = 0.05;
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
Script.include("libraries/globals.js");
|
||||
|
||||
var soundClip = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Cocktail%20Party%20Snippets/Walken1.wav");
|
||||
var soundClip = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Cocktail%20Party%20Snippets/Walken1.wav");
|
||||
|
||||
function playSound() {
|
||||
Audio.playSound(soundClip, {
|
||||
|
|
|
@ -23,7 +23,7 @@ var audioOptions = {
|
|||
|
||||
var injector = null;
|
||||
|
||||
var sound = new Sound(soundURL, audioOptions.isStereo);
|
||||
var sound = SoundCache.getSound(soundURL, audioOptions.isStereo);
|
||||
|
||||
var entity = null;
|
||||
var properties = null;
|
||||
|
|
|
@ -84,13 +84,13 @@ var missileFired = false;
|
|||
var myMissile;
|
||||
|
||||
// sounds
|
||||
var hitSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/hit.raw");
|
||||
var shootSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/shoot.raw");
|
||||
var hitSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/hit.raw");
|
||||
var shootSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/shoot.raw");
|
||||
var moveSounds = new Array();
|
||||
moveSounds[0] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/Lo1.raw");
|
||||
moveSounds[1] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/Lo2.raw");
|
||||
moveSounds[2] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/Lo3.raw");
|
||||
moveSounds[3] = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/Lo4.raw");
|
||||
moveSounds[0] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/Lo1.raw");
|
||||
moveSounds[1] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/Lo2.raw");
|
||||
moveSounds[2] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/Lo3.raw");
|
||||
moveSounds[3] = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Space%20Invaders/Lo4.raw");
|
||||
var currentMoveSound = 0;
|
||||
var numberOfSounds = 4;
|
||||
var stepsPerSound = invaderStepsPerCycle / numberOfSounds;
|
||||
|
|
|
@ -39,9 +39,9 @@ var rightBallAlreadyInHand = false;
|
|||
var leftHandEntity;
|
||||
var rightHandEntity;
|
||||
|
||||
var newSound = new Sound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/throw.raw");
|
||||
var catchSound = new Sound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/catch.raw");
|
||||
var throwSound = new Sound(HIFI_PUBLIC_BUCKET + "sounds/Switches%20and%20sliders/slider%20-%20whoosh1.raw");
|
||||
var newSound = SoundCache.getSound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/throw.raw");
|
||||
var catchSound = SoundCache.getSound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/catch.raw");
|
||||
var throwSound = SoundCache.getSound(HIFI_PUBLIC_BUCKET + "sounds/Switches%20and%20sliders/slider%20-%20whoosh1.raw");
|
||||
var targetRadius = 1.0;
|
||||
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@
|
|||
#include <PacketHeaders.h>
|
||||
#include <PerfStat.h>
|
||||
#include <ResourceCache.h>
|
||||
#include <SoundCache.h>
|
||||
#include <UserActivityLogger.h>
|
||||
#include <UUID.h>
|
||||
|
||||
|
@ -160,7 +161,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
|
|||
_lastQueriedViewFrustum(),
|
||||
_lastQueriedTime(usecTimestampNow()),
|
||||
_mirrorViewRect(QRect(MIRROR_VIEW_LEFT_PADDING, MIRROR_VIEW_TOP_PADDING, MIRROR_VIEW_WIDTH, MIRROR_VIEW_HEIGHT)),
|
||||
_viewTransform(new gpu::Transform()),
|
||||
_viewTransform(),
|
||||
_scaleMirror(1.0f),
|
||||
_rotateMirror(0.0f),
|
||||
_raiseMirror(0.0f),
|
||||
|
@ -2911,13 +2912,13 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
|
|||
// Equivalent to what is happening with _untranslatedViewMatrix and the _viewMatrixTranslation
|
||||
// the viewTransofmr object is updatded with the correct values and saved,
|
||||
// this is what is used for rendering the Entities and avatars
|
||||
gpu::Transform viewTransform;
|
||||
Transform viewTransform;
|
||||
viewTransform.setTranslation(whichCamera.getPosition());
|
||||
viewTransform.setRotation(rotation);
|
||||
viewTransform.postTranslate(eyeOffsetPos);
|
||||
viewTransform.postRotate(eyeOffsetOrient);
|
||||
if (whichCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
viewTransform.setScale(gpu::Transform::Vec3(-1.0f, 1.0f, 1.0f));
|
||||
viewTransform.setScale(Transform::Vec3(-1.0f, 1.0f, 1.0f));
|
||||
}
|
||||
setViewTransform(viewTransform);
|
||||
|
||||
|
@ -3117,8 +3118,8 @@ void Application::updateUntranslatedViewMatrix(const glm::vec3& viewMatrixTransl
|
|||
_viewMatrixTranslation = viewMatrixTranslation;
|
||||
}
|
||||
|
||||
void Application::setViewTransform(const gpu::Transform& view) {
|
||||
(*_viewTransform) = view;
|
||||
void Application::setViewTransform(const Transform& view) {
|
||||
_viewTransform = view;
|
||||
}
|
||||
|
||||
void Application::loadTranslatedViewMatrix(const glm::vec3& translation) {
|
||||
|
@ -3916,6 +3917,7 @@ void Application::registerScriptEngineWithApplicationServices(ScriptEngine* scri
|
|||
scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance());
|
||||
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
||||
scriptEngine->registerGlobalObject("AnimationCache", &_animationCache);
|
||||
scriptEngine->registerGlobalObject("SoundCache", &SoundCache::getInstance());
|
||||
scriptEngine->registerGlobalObject("AudioReflector", &_audioReflector);
|
||||
scriptEngine->registerGlobalObject("Account", AccountScriptingInterface::getInstance());
|
||||
scriptEngine->registerGlobalObject("Metavoxels", &_metavoxels);
|
||||
|
|
|
@ -232,8 +232,8 @@ public:
|
|||
const glm::vec3& getViewMatrixTranslation() const { return _viewMatrixTranslation; }
|
||||
void setViewMatrixTranslation(const glm::vec3& translation) { _viewMatrixTranslation = translation; }
|
||||
|
||||
const gpu::TransformPointer& getViewTransform() const { return _viewTransform; }
|
||||
void setViewTransform(const gpu::Transform& view);
|
||||
const Transform& getViewTransform() const { return _viewTransform; }
|
||||
void setViewTransform(const Transform& view);
|
||||
|
||||
/// if you need to access the application settings, use lockSettings()/unlockSettings()
|
||||
QSettings* lockSettings() { _settingsMutex.lock(); return _settings; }
|
||||
|
@ -526,7 +526,7 @@ private:
|
|||
QRect _mirrorViewRect;
|
||||
RearMirrorTools* _rearMirrorTools;
|
||||
|
||||
gpu::TransformPointer _viewTransform;
|
||||
Transform _viewTransform;
|
||||
glm::mat4 _untranslatedViewMatrix;
|
||||
glm::vec3 _viewMatrixTranslation;
|
||||
glm::mat4 _projectionMatrix;
|
||||
|
|
|
@ -135,19 +135,19 @@ void Batch::setIndexBuffer(Type type, const BufferPointer& buffer, Offset offset
|
|||
_params.push_back(type);
|
||||
}
|
||||
|
||||
void Batch::setModelTransform(const TransformPointer& model) {
|
||||
void Batch::setModelTransform(const Transform& model) {
|
||||
ADD_COMMAND(setModelTransform);
|
||||
|
||||
_params.push_back(_transforms.cache(model));
|
||||
}
|
||||
|
||||
void Batch::setViewTransform(const TransformPointer& view) {
|
||||
void Batch::setViewTransform(const Transform& view) {
|
||||
ADD_COMMAND(setViewTransform);
|
||||
|
||||
_params.push_back(_transforms.cache(view));
|
||||
}
|
||||
|
||||
void Batch::setProjectionTransform(const TransformPointer& proj) {
|
||||
void Batch::setProjectionTransform(const Transform& proj) {
|
||||
ADD_COMMAND(setProjectionTransform);
|
||||
|
||||
_params.push_back(_transforms.cache(proj));
|
||||
|
|
|
@ -50,10 +50,6 @@ enum Primitive {
|
|||
NUM_PRIMITIVES,
|
||||
};
|
||||
|
||||
typedef ::Transform Transform;
|
||||
typedef QSharedPointer< ::gpu::Transform > TransformPointer;
|
||||
typedef std::vector< TransformPointer > Transforms;
|
||||
|
||||
class Batch {
|
||||
public:
|
||||
typedef Stream::Slot Slot;
|
||||
|
@ -87,9 +83,9 @@ public:
|
|||
// finaly projected into the clip space by the projection transform
|
||||
// WARNING: ViewTransform transform from eye space to world space, its inverse is composed
|
||||
// with the ModelTransformu to create the equivalent of the glModelViewMatrix
|
||||
void setModelTransform(const TransformPointer& model);
|
||||
void setViewTransform(const TransformPointer& view);
|
||||
void setProjectionTransform(const TransformPointer& proj);
|
||||
void setModelTransform(const Transform& model);
|
||||
void setViewTransform(const Transform& view);
|
||||
void setProjectionTransform(const Transform& proj);
|
||||
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
|
@ -258,35 +254,35 @@ public:
|
|||
template <typename T>
|
||||
class Cache {
|
||||
public:
|
||||
typedef QSharedPointer<T> Pointer;
|
||||
Pointer _pointer;
|
||||
Cache<T>(const Pointer& pointer) : _pointer(pointer) {}
|
||||
typedef T Data;
|
||||
Data _data;
|
||||
Cache<T>(const Data& data) : _data(data) {}
|
||||
|
||||
class Vector {
|
||||
public:
|
||||
std::vector< Cache<T> > _pointers;
|
||||
std::vector< Cache<T> > _items;
|
||||
|
||||
uint32 cache(const Pointer& pointer) {
|
||||
uint32 offset = _pointers.size();
|
||||
_pointers.push_back(Cache<T>(pointer));
|
||||
uint32 cache(const Data& data) {
|
||||
uint32 offset = _items.size();
|
||||
_items.push_back(Cache<T>(data));
|
||||
return offset;
|
||||
}
|
||||
|
||||
Pointer get(uint32 offset) {
|
||||
if (offset >= _pointers.size()) {
|
||||
return Pointer();
|
||||
Data get(uint32 offset) {
|
||||
if (offset >= _items.size()) {
|
||||
return Data();
|
||||
}
|
||||
return (_pointers.data() + offset)->_pointer;
|
||||
return (_items.data() + offset)->_data;
|
||||
}
|
||||
|
||||
void clear() {
|
||||
_pointers.clear();
|
||||
_items.clear();
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
typedef Cache<Buffer>::Vector BufferCaches;
|
||||
typedef Cache<Stream::Format>::Vector StreamFormatCaches;
|
||||
|
||||
typedef Cache<BufferPointer>::Vector BufferCaches;
|
||||
typedef Cache<Stream::FormatPointer>::Vector StreamFormatCaches;
|
||||
typedef Cache<Transform>::Vector TransformCaches;
|
||||
|
||||
typedef unsigned char Byte;
|
||||
|
|
|
@ -113,15 +113,7 @@ static const GLenum _elementTypeToGLType[NUM_TYPES]= {
|
|||
|
||||
|
||||
GLBackend::GLBackend() :
|
||||
_needInputFormatUpdate(true),
|
||||
_inputFormat(0),
|
||||
_inputBuffersState(0),
|
||||
_inputBuffers(_inputBuffersState.size(), BufferPointer(0)),
|
||||
_inputBufferOffsets(_inputBuffersState.size(), 0),
|
||||
_inputBufferStrides(_inputBuffersState.size(), 0),
|
||||
_indexBuffer(0),
|
||||
_indexBufferOffset(0),
|
||||
_inputAttributeActivation(0),
|
||||
_input(),
|
||||
_transform()
|
||||
{
|
||||
|
||||
|
@ -203,9 +195,9 @@ void GLBackend::do_drawIndexed(Batch& batch, uint32 paramOffset) {
|
|||
uint32 numIndices = batch._params[paramOffset + 1]._uint;
|
||||
uint32 startIndex = batch._params[paramOffset + 0]._uint;
|
||||
|
||||
GLenum glType = _elementTypeToGLType[_indexBufferType];
|
||||
GLenum glType = _elementTypeToGLType[_input._indexBufferType];
|
||||
|
||||
glDrawElements(mode, numIndices, glType, reinterpret_cast<GLvoid*>(startIndex + _indexBufferOffset));
|
||||
glDrawElements(mode, numIndices, glType, reinterpret_cast<GLvoid*>(startIndex + _input._indexBufferOffset));
|
||||
CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
@ -220,9 +212,9 @@ void GLBackend::do_drawIndexedInstanced(Batch& batch, uint32 paramOffset) {
|
|||
void GLBackend::do_setInputFormat(Batch& batch, uint32 paramOffset) {
|
||||
Stream::FormatPointer format = batch._streamFormats.get(batch._params[paramOffset]._uint);
|
||||
|
||||
if (format != _inputFormat) {
|
||||
_inputFormat = format;
|
||||
_needInputFormatUpdate = true;
|
||||
if (format != _input._format) {
|
||||
_input._format = format;
|
||||
_input._invalidFormat = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -233,10 +225,10 @@ void GLBackend::do_setInputBuffer(Batch& batch, uint32 paramOffset) {
|
|||
uint32 channel = batch._params[paramOffset + 3]._uint;
|
||||
|
||||
if (channel < getNumInputBuffers()) {
|
||||
_inputBuffers[channel] = buffer;
|
||||
_inputBufferOffsets[channel] = offset;
|
||||
_inputBufferStrides[channel] = stride;
|
||||
_inputBuffersState.set(channel);
|
||||
_input._buffers[channel] = buffer;
|
||||
_input._bufferOffsets[channel] = offset;
|
||||
_input._bufferStrides[channel] = stride;
|
||||
_input._buffersState.set(channel);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -252,14 +244,14 @@ static const GLenum attributeSlotToClassicAttribName[NUM_CLASSIC_ATTRIBS] = {
|
|||
#endif
|
||||
|
||||
void GLBackend::updateInput() {
|
||||
if (_needInputFormatUpdate || _inputBuffersState.any()) {
|
||||
if (_input._invalidFormat || _input._buffersState.any()) {
|
||||
|
||||
if (_needInputFormatUpdate) {
|
||||
InputActivationCache newActivation;
|
||||
if (_input._invalidFormat) {
|
||||
InputStageState::ActivationCache newActivation;
|
||||
|
||||
// Check expected activation
|
||||
if (_inputFormat) {
|
||||
const Stream::Format::AttributeMap& attributes = _inputFormat->getAttributes();
|
||||
if (_input._format) {
|
||||
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
|
||||
for (Stream::Format::AttributeMap::const_iterator it = attributes.begin(); it != attributes.end(); it++) {
|
||||
const Stream::Attribute& attrib = (*it).second;
|
||||
newActivation.set(attrib._slot);
|
||||
|
@ -269,7 +261,7 @@ void GLBackend::updateInput() {
|
|||
// Manage Activation what was and what is expected now
|
||||
for (unsigned int i = 0; i < newActivation.size(); i++) {
|
||||
bool newState = newActivation[i];
|
||||
if (newState != _inputAttributeActivation[i]) {
|
||||
if (newState != _input._attributeActivation[i]) {
|
||||
#if defined(SUPPORT_LEGACY_OPENGL)
|
||||
if (i < NUM_CLASSIC_ATTRIBS) {
|
||||
if (newState) {
|
||||
|
@ -290,31 +282,31 @@ void GLBackend::updateInput() {
|
|||
}
|
||||
CHECK_GL_ERROR();
|
||||
|
||||
_inputAttributeActivation.flip(i);
|
||||
_input._attributeActivation.flip(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now we need to bind the buffers and assign the attrib pointers
|
||||
if (_inputFormat) {
|
||||
const Buffers& buffers = _inputBuffers;
|
||||
const Offsets& offsets = _inputBufferOffsets;
|
||||
const Offsets& strides = _inputBufferStrides;
|
||||
if (_input._format) {
|
||||
const Buffers& buffers = _input._buffers;
|
||||
const Offsets& offsets = _input._bufferOffsets;
|
||||
const Offsets& strides = _input._bufferStrides;
|
||||
|
||||
const Stream::Format::AttributeMap& attributes = _inputFormat->getAttributes();
|
||||
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
|
||||
|
||||
for (Stream::Format::ChannelMap::const_iterator channelIt = _inputFormat->getChannels().begin();
|
||||
channelIt != _inputFormat->getChannels().end();
|
||||
for (Stream::Format::ChannelMap::const_iterator channelIt = _input._format->getChannels().begin();
|
||||
channelIt != _input._format->getChannels().end();
|
||||
channelIt++) {
|
||||
const Stream::Format::ChannelMap::value_type::second_type& channel = (*channelIt).second;
|
||||
if ((*channelIt).first < buffers.size()) {
|
||||
int bufferNum = (*channelIt).first;
|
||||
|
||||
if (_inputBuffersState.test(bufferNum) || _needInputFormatUpdate) {
|
||||
if (_input._buffersState.test(bufferNum) || _input._invalidFormat) {
|
||||
GLuint vbo = gpu::GLBackend::getBufferID((*buffers[bufferNum]));
|
||||
glBindBuffer(GL_ARRAY_BUFFER, vbo);
|
||||
CHECK_GL_ERROR();
|
||||
_inputBuffersState[bufferNum] = false;
|
||||
_input._buffersState[bufferNum] = false;
|
||||
|
||||
for (unsigned int i = 0; i < channel._slots.size(); i++) {
|
||||
const Stream::Attribute& attrib = attributes.at(channel._slots[i]);
|
||||
|
@ -354,7 +346,7 @@ void GLBackend::updateInput() {
|
|||
}
|
||||
}
|
||||
// everything format related should be in sync now
|
||||
_needInputFormatUpdate = false;
|
||||
_input._invalidFormat = false;
|
||||
}
|
||||
|
||||
/* TODO: Fancy version GL4.4
|
||||
|
@ -415,10 +407,10 @@ void GLBackend::updateInput() {
|
|||
|
||||
|
||||
void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
|
||||
_indexBufferType = (Type) batch._params[paramOffset + 2]._uint;
|
||||
_input._indexBufferType = (Type) batch._params[paramOffset + 2]._uint;
|
||||
BufferPointer indexBuffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
|
||||
_indexBufferOffset = batch._params[paramOffset + 0]._uint;
|
||||
_indexBuffer = indexBuffer;
|
||||
_input._indexBufferOffset = batch._params[paramOffset + 0]._uint;
|
||||
_input._indexBuffer = indexBuffer;
|
||||
if (indexBuffer) {
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, getBufferID(*indexBuffer));
|
||||
} else {
|
||||
|
@ -430,30 +422,18 @@ void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
|
|||
// Transform Stage
|
||||
|
||||
void GLBackend::do_setModelTransform(Batch& batch, uint32 paramOffset) {
|
||||
TransformPointer modelTransform = batch._transforms.get(batch._params[paramOffset]._uint);
|
||||
|
||||
if (_transform._model.isNull() || (modelTransform != _transform._model)) {
|
||||
_transform._model = modelTransform;
|
||||
_transform._invalidModel = true;
|
||||
}
|
||||
_transform._model = batch._transforms.get(batch._params[paramOffset]._uint);
|
||||
_transform._invalidModel = true;
|
||||
}
|
||||
|
||||
void GLBackend::do_setViewTransform(Batch& batch, uint32 paramOffset) {
|
||||
TransformPointer viewTransform = batch._transforms.get(batch._params[paramOffset]._uint);
|
||||
|
||||
if (_transform._view.isNull() || (viewTransform != _transform._view)) {
|
||||
_transform._view = viewTransform;
|
||||
_transform._invalidView = true;
|
||||
}
|
||||
_transform._view = batch._transforms.get(batch._params[paramOffset]._uint);
|
||||
_transform._invalidView = true;
|
||||
}
|
||||
|
||||
void GLBackend::do_setProjectionTransform(Batch& batch, uint32 paramOffset) {
|
||||
TransformPointer projectionTransform = batch._transforms.get(batch._params[paramOffset]._uint);
|
||||
|
||||
if (_transform._projection.isNull() || (projectionTransform != _transform._projection)) {
|
||||
_transform._projection = projectionTransform;
|
||||
_transform._invalidProj = true;
|
||||
}
|
||||
_transform._projection = batch._transforms.get(batch._params[paramOffset]._uint);
|
||||
_transform._invalidProj = true;
|
||||
}
|
||||
|
||||
void GLBackend::updateTransform() {
|
||||
|
@ -468,28 +448,28 @@ void GLBackend::updateTransform() {
|
|||
}
|
||||
|
||||
if (_transform._invalidModel || _transform._invalidView) {
|
||||
if (!_transform._model.isNull()) {
|
||||
if (!_transform._model.isIdentity()) {
|
||||
if (_transform._lastMode != GL_MODELVIEW) {
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
_transform._lastMode = GL_MODELVIEW;
|
||||
}
|
||||
Transform::Mat4 modelView;
|
||||
if (!_transform._view.isNull()) {
|
||||
if (!_transform._view.isIdentity()) {
|
||||
Transform mvx;
|
||||
Transform::inverseMult(mvx, (*_transform._view), (*_transform._model));
|
||||
Transform::inverseMult(mvx, _transform._view, _transform._model);
|
||||
mvx.getMatrix(modelView);
|
||||
} else {
|
||||
_transform._model->getMatrix(modelView);
|
||||
_transform._model.getMatrix(modelView);
|
||||
}
|
||||
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
|
||||
} else {
|
||||
if (!_transform._view.isNull()) {
|
||||
if (!_transform._view.isIdentity()) {
|
||||
if (_transform._lastMode != GL_MODELVIEW) {
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
_transform._lastMode = GL_MODELVIEW;
|
||||
}
|
||||
Transform::Mat4 modelView;
|
||||
_transform._view->getInverseMatrix(modelView);
|
||||
_transform._view.getInverseMatrix(modelView);
|
||||
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
|
||||
} else {
|
||||
// TODO: eventually do something about the matrix when neither view nor model is specified?
|
||||
|
|
|
@ -48,7 +48,7 @@ public:
|
|||
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
|
||||
static const int MAX_NUM_INPUT_BUFFERS = 16;
|
||||
|
||||
uint32 getNumInputBuffers() const { return _inputBuffersState.size(); }
|
||||
uint32 getNumInputBuffers() const { return _input._buffersState.size(); }
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -62,22 +62,39 @@ protected:
|
|||
void do_setInputFormat(Batch& batch, uint32 paramOffset);
|
||||
void do_setInputBuffer(Batch& batch, uint32 paramOffset);
|
||||
void do_setIndexBuffer(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void updateInput();
|
||||
bool _needInputFormatUpdate;
|
||||
Stream::FormatPointer _inputFormat;
|
||||
typedef std::bitset<MAX_NUM_INPUT_BUFFERS> InputBuffersState;
|
||||
InputBuffersState _inputBuffersState;
|
||||
struct InputStageState {
|
||||
bool _invalidFormat;
|
||||
Stream::FormatPointer _format;
|
||||
|
||||
Buffers _inputBuffers;
|
||||
Offsets _inputBufferOffsets;
|
||||
Offsets _inputBufferStrides;
|
||||
typedef std::bitset<MAX_NUM_INPUT_BUFFERS> BuffersState;
|
||||
BuffersState _buffersState;
|
||||
|
||||
BufferPointer _indexBuffer;
|
||||
Offset _indexBufferOffset;
|
||||
Type _indexBufferType;
|
||||
Buffers _buffers;
|
||||
Offsets _bufferOffsets;
|
||||
Offsets _bufferStrides;
|
||||
|
||||
typedef std::bitset<MAX_NUM_ATTRIBUTES> InputActivationCache;
|
||||
InputActivationCache _inputAttributeActivation;
|
||||
BufferPointer _indexBuffer;
|
||||
Offset _indexBufferOffset;
|
||||
Type _indexBufferType;
|
||||
|
||||
typedef std::bitset<MAX_NUM_ATTRIBUTES> ActivationCache;
|
||||
ActivationCache _attributeActivation;
|
||||
|
||||
InputStageState() :
|
||||
_invalidFormat(true),
|
||||
_format(0),
|
||||
_buffersState(0),
|
||||
_buffers(_buffersState.size(), BufferPointer(0)),
|
||||
_bufferOffsets(_buffersState.size(), 0),
|
||||
_bufferStrides(_buffersState.size(), 0),
|
||||
_indexBuffer(0),
|
||||
_indexBufferOffset(0),
|
||||
_indexBufferType(UINT32),
|
||||
_attributeActivation(0)
|
||||
{}
|
||||
} _input;
|
||||
|
||||
// Transform Stage
|
||||
void do_setModelTransform(Batch& batch, uint32 paramOffset);
|
||||
|
@ -86,9 +103,9 @@ protected:
|
|||
|
||||
void updateTransform();
|
||||
struct TransformStageState {
|
||||
TransformPointer _model;
|
||||
TransformPointer _view;
|
||||
TransformPointer _projection;
|
||||
Transform _model;
|
||||
Transform _view;
|
||||
Transform _projection;
|
||||
bool _invalidModel;
|
||||
bool _invalidView;
|
||||
bool _invalidProj;
|
||||
|
@ -96,9 +113,9 @@ protected:
|
|||
GLenum _lastMode;
|
||||
|
||||
TransformStageState() :
|
||||
_model(0),
|
||||
_view(0),
|
||||
_projection(0),
|
||||
_model(),
|
||||
_view(),
|
||||
_projection(),
|
||||
_invalidModel(true),
|
||||
_invalidView(true),
|
||||
_invalidProj(true),
|
||||
|
|
|
@ -567,11 +567,11 @@ bool Model::renderCore(float alpha, RenderMode mode, RenderArgs* args) {
|
|||
|
||||
// Capture the view matrix once for the rendering of this model
|
||||
if (_transforms.empty()) {
|
||||
_transforms.push_back(gpu::TransformPointer(new gpu::Transform()));
|
||||
_transforms.push_back(Transform());
|
||||
}
|
||||
(*_transforms[0]) = gpu::Transform((*Application::getInstance()->getViewTransform()));
|
||||
_transforms[0] = Application::getInstance()->getViewTransform();
|
||||
// apply entity translation offset to the viewTransform in one go (it's a preTranslate because viewTransform goes from world to eye space)
|
||||
_transforms[0]->preTranslate(-_translation);
|
||||
_transforms[0].preTranslate(-_translation);
|
||||
|
||||
batch.setViewTransform(_transforms[0]);
|
||||
|
||||
|
@ -1493,10 +1493,10 @@ void Model::setupBatchTransform(gpu::Batch& batch) {
|
|||
|
||||
// Capture the view matrix once for the rendering of this model
|
||||
if (_transforms.empty()) {
|
||||
_transforms.push_back(gpu::TransformPointer(new gpu::Transform()));
|
||||
_transforms.push_back(Transform());
|
||||
}
|
||||
(*_transforms[0]) = gpu::Transform((*Application::getInstance()->getViewTransform()));
|
||||
_transforms[0]->preTranslate(-_translation);
|
||||
_transforms[0] = Application::getInstance()->getViewTransform();
|
||||
_transforms[0].preTranslate(-_translation);
|
||||
batch.setViewTransform(_transforms[0]);
|
||||
}
|
||||
|
||||
|
@ -1553,46 +1553,14 @@ void Model::endScene(RenderMode mode, RenderArgs* args) {
|
|||
int opaqueMeshPartsRendered = 0;
|
||||
|
||||
// now, for each model in the scene, render the mesh portions
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, true, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
opaqueMeshPartsRendered += model->renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
opaqueMeshPartsRendered += renderMeshesForModelsInScene(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, false, args);
|
||||
opaqueMeshPartsRendered += renderMeshesForModelsInScene(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, true, args);
|
||||
opaqueMeshPartsRendered += renderMeshesForModelsInScene(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, false, args);
|
||||
opaqueMeshPartsRendered += renderMeshesForModelsInScene(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, true, args);
|
||||
opaqueMeshPartsRendered += renderMeshesForModelsInScene(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, false, args);
|
||||
opaqueMeshPartsRendered += renderMeshesForModelsInScene(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, true, args);
|
||||
opaqueMeshPartsRendered += renderMeshesForModelsInScene(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, true, false, args);
|
||||
opaqueMeshPartsRendered += renderMeshesForModelsInScene(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, true, true, args);
|
||||
|
||||
// render translucent meshes afterwards
|
||||
//Application::getInstance()->getTextureCache()->setPrimaryDrawBuffers(false, true, true);
|
||||
|
@ -1606,46 +1574,14 @@ void Model::endScene(RenderMode mode, RenderArgs* args) {
|
|||
|
||||
int translucentParts = 0;
|
||||
const float MOSTLY_OPAQUE_THRESHOLD = 0.75f;
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, false, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, true, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, false, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, true, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, false, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, true, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, false, false, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, false, true, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, true, false, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, true, true, true, args);
|
||||
|
||||
GLBATCH(glDisable)(GL_ALPHA_TEST);
|
||||
GLBATCH(glEnable)(GL_BLEND);
|
||||
|
@ -1662,46 +1598,14 @@ void Model::endScene(RenderMode mode, RenderArgs* args) {
|
|||
|
||||
if (mode == DEFAULT_RENDER_MODE || mode == DIFFUSE_RENDER_MODE) {
|
||||
const float MOSTLY_TRANSPARENT_THRESHOLD = 0.0f;
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, false, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, true, false, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
model->setupBatchTransform(batch);
|
||||
translucentParts += model->renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, false, true, args);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, false, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, true, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, false, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, true, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, false, false, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, false, true, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, true, false, args);
|
||||
translucentParts += renderMeshesForModelsInScene(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, true, true, true, args);
|
||||
}
|
||||
|
||||
GLBATCH(glDepthMask)(true);
|
||||
|
@ -1981,19 +1885,8 @@ void Model::segregateMeshGroups() {
|
|||
_meshGroupsKnown = true;
|
||||
}
|
||||
|
||||
int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
|
||||
bool hasTangents, bool hasSpecular, bool isSkinned, RenderArgs* args) {
|
||||
|
||||
QVector<int>* Model::pickMeshList(bool translucent, float alphaThreshold, bool hasTangents, bool hasSpecular, bool isSkinned) {
|
||||
PROFILE_RANGE(__FUNCTION__);
|
||||
bool dontCullOutOfViewMeshParts = Menu::getInstance()->isOptionChecked(MenuOption::DontCullOutOfViewMeshParts);
|
||||
bool cullTooSmallMeshParts = !Menu::getInstance()->isOptionChecked(MenuOption::DontCullTooSmallMeshParts);
|
||||
bool dontReduceMaterialSwitches = Menu::getInstance()->isOptionChecked(MenuOption::DontReduceMaterialSwitches);
|
||||
|
||||
QString lastMaterialID;
|
||||
int meshPartsRendered = 0;
|
||||
updateVisibleJointStates();
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
const QVector<NetworkMesh>& networkMeshes = _geometry->getMeshes();
|
||||
|
||||
// depending on which parameters we were called with, pick the correct mesh group to render
|
||||
QVector<int>* whichList = NULL;
|
||||
|
@ -2032,23 +1925,18 @@ int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, fl
|
|||
} else {
|
||||
qDebug() << "unexpected!!! this mesh didn't fall into any or our groups???";
|
||||
}
|
||||
|
||||
if (!whichList) {
|
||||
qDebug() << "unexpected!!! we don't know which list of meshes to render...";
|
||||
return 0;
|
||||
}
|
||||
QVector<int>& list = *whichList;
|
||||
|
||||
// If this list has nothing to render, then don't bother proceeding. This saves us on binding to programs
|
||||
if (list.size() == 0) {
|
||||
return 0;
|
||||
}
|
||||
return whichList;
|
||||
}
|
||||
|
||||
void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
|
||||
bool hasTangents, bool hasSpecular, bool isSkinned, RenderArgs* args,
|
||||
SkinLocations*& skinLocations, GLenum& specularTextureUnit) {
|
||||
|
||||
ProgramObject* program = &_program;
|
||||
Locations* locations = &_locations;
|
||||
ProgramObject* skinProgram = &_skinProgram;
|
||||
SkinLocations* skinLocations = &_skinLocations;
|
||||
GLenum specularTextureUnit = 0;
|
||||
skinLocations = &_skinLocations;
|
||||
specularTextureUnit = 0;
|
||||
if (mode == SHADOW_RENDER_MODE) {
|
||||
program = &_shadowProgram;
|
||||
skinProgram = &_skinShadowProgram;
|
||||
|
@ -2091,8 +1979,84 @@ int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, fl
|
|||
if (!activeProgram->isLinked()) {
|
||||
activeProgram->link();
|
||||
}
|
||||
|
||||
GLBATCH(glUseProgram)(activeProgram->programId());
|
||||
GLBATCH(glUniform1f)(activeLocations->alphaThreshold, alphaThreshold);
|
||||
}
|
||||
|
||||
int Model::renderMeshesForModelsInScene(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
|
||||
bool hasTangents, bool hasSpecular, bool isSkinned, RenderArgs* args) {
|
||||
|
||||
PROFILE_RANGE(__FUNCTION__);
|
||||
int meshPartsRendered = 0;
|
||||
|
||||
bool pickProgramsNeeded = true;
|
||||
SkinLocations* skinLocations;
|
||||
GLenum specularTextureUnit;
|
||||
|
||||
foreach(Model* model, _modelsInScene) {
|
||||
QVector<int>* whichList = model->pickMeshList(translucent, alphaThreshold, hasTangents, hasSpecular, isSkinned);
|
||||
if (whichList) {
|
||||
QVector<int>& list = *whichList;
|
||||
if (list.size() > 0) {
|
||||
if (pickProgramsNeeded) {
|
||||
pickPrograms(batch, mode, translucent, alphaThreshold, hasTangents, hasSpecular, isSkinned, args, skinLocations, specularTextureUnit);
|
||||
pickProgramsNeeded = false;
|
||||
}
|
||||
model->setupBatchTransform(batch);
|
||||
meshPartsRendered += model->renderMeshesFromList(list, batch, mode, translucent, alphaThreshold, args, skinLocations, specularTextureUnit);
|
||||
GLBATCH(glPopMatrix)();
|
||||
}
|
||||
}
|
||||
}
|
||||
// if we selected a program, then unselect it
|
||||
if (!pickProgramsNeeded) {
|
||||
GLBATCH(glUseProgram)(0);
|
||||
}
|
||||
return meshPartsRendered;
|
||||
}
|
||||
|
||||
int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
|
||||
bool hasTangents, bool hasSpecular, bool isSkinned, RenderArgs* args) {
|
||||
|
||||
PROFILE_RANGE(__FUNCTION__);
|
||||
int meshPartsRendered = 0;
|
||||
|
||||
QVector<int>* whichList = pickMeshList(translucent, alphaThreshold, hasTangents, hasSpecular, isSkinned);
|
||||
|
||||
if (!whichList) {
|
||||
qDebug() << "unexpected!!! we don't know which list of meshes to render...";
|
||||
return 0;
|
||||
}
|
||||
QVector<int>& list = *whichList;
|
||||
|
||||
// If this list has nothing to render, then don't bother proceeding. This saves us on binding to programs
|
||||
if (list.size() == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
SkinLocations* skinLocations;
|
||||
GLenum specularTextureUnit;
|
||||
pickPrograms(batch, mode, translucent, alphaThreshold, hasTangents, hasSpecular, isSkinned, args, skinLocations, specularTextureUnit);
|
||||
meshPartsRendered = renderMeshesFromList(list, batch, mode, translucent, alphaThreshold, args, skinLocations, specularTextureUnit);
|
||||
GLBATCH(glUseProgram)(0);
|
||||
|
||||
return meshPartsRendered;
|
||||
}
|
||||
|
||||
|
||||
int Model::renderMeshesFromList(QVector<int>& list, gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold, RenderArgs* args,
|
||||
SkinLocations* skinLocations, GLenum specularTextureUnit) {
|
||||
PROFILE_RANGE(__FUNCTION__);
|
||||
bool dontCullOutOfViewMeshParts = Menu::getInstance()->isOptionChecked(MenuOption::DontCullOutOfViewMeshParts);
|
||||
bool cullTooSmallMeshParts = !Menu::getInstance()->isOptionChecked(MenuOption::DontCullTooSmallMeshParts);
|
||||
bool dontReduceMaterialSwitches = Menu::getInstance()->isOptionChecked(MenuOption::DontReduceMaterialSwitches);
|
||||
|
||||
QString lastMaterialID;
|
||||
int meshPartsRendered = 0;
|
||||
updateVisibleJointStates();
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
const QVector<NetworkMesh>& networkMeshes = _geometry->getMeshes();
|
||||
|
||||
// i is the "index" from the original networkMeshes QVector...
|
||||
foreach (int i, list) {
|
||||
|
@ -2149,10 +2113,9 @@ int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, fl
|
|||
if (state.clusterMatrices.size() > 1) {
|
||||
GLBATCH(glUniformMatrix4fv)(skinLocations->clusterMatrices, state.clusterMatrices.size(), false,
|
||||
(const float*)state.clusterMatrices.constData());
|
||||
batch.setModelTransform(gpu::TransformPointer());
|
||||
batch.setModelTransform(Transform());
|
||||
} else {
|
||||
gpu::TransformPointer modelTransform(new gpu::Transform(state.clusterMatrices[0]));
|
||||
batch.setModelTransform(modelTransform);
|
||||
batch.setModelTransform(Transform(state.clusterMatrices[0]));
|
||||
}
|
||||
|
||||
if (mesh.blendshapes.isEmpty()) {
|
||||
|
@ -2268,7 +2231,5 @@ int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, fl
|
|||
|
||||
}
|
||||
|
||||
GLBATCH(glUseProgram)(0);
|
||||
|
||||
return meshPartsRendered;
|
||||
}
|
||||
|
|
|
@ -283,7 +283,7 @@ private:
|
|||
QUrl _url;
|
||||
|
||||
gpu::Buffers _blendedVertexBuffers;
|
||||
gpu::Transforms _transforms;
|
||||
std::vector<Transform> _transforms;
|
||||
gpu::Batch _renderBatch;
|
||||
|
||||
QVector<QVector<QSharedPointer<Texture> > > _dilatedTextures;
|
||||
|
@ -409,6 +409,18 @@ private:
|
|||
int renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
|
||||
bool hasTangents, bool hasSpecular, bool isSkinned, RenderArgs* args = NULL);
|
||||
void setupBatchTransform(gpu::Batch& batch);
|
||||
QVector<int>* pickMeshList(bool translucent, float alphaThreshold, bool hasTangents, bool hasSpecular, bool isSkinned);
|
||||
|
||||
int renderMeshesFromList(QVector<int>& list, gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
|
||||
RenderArgs* args, SkinLocations* skinLocations, GLenum specularTextureUnit);
|
||||
|
||||
static void pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
|
||||
bool hasTangents, bool hasSpecular, bool isSkinned, RenderArgs* args,
|
||||
SkinLocations*& skinLocations, GLenum& specularTextureUnit);
|
||||
|
||||
static int renderMeshesForModelsInScene(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
|
||||
bool hasTangents, bool hasSpecular, bool isSkinned, RenderArgs* args);
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
//
|
||||
|
||||
#include <limits>
|
||||
#include <typeinfo>
|
||||
#include <Application.h>
|
||||
#include <Menu.h>
|
||||
|
||||
|
@ -360,3 +361,19 @@ bool Overlays::isLoaded(unsigned int id) {
|
|||
return overlay->isLoaded();
|
||||
}
|
||||
|
||||
float Overlays::textWidth(unsigned int id, const QString& text) const {
|
||||
Overlay* thisOverlay = _overlays2D[id];
|
||||
if (thisOverlay) {
|
||||
if (typeid(*thisOverlay) == typeid(TextOverlay)) {
|
||||
return static_cast<TextOverlay*>(thisOverlay)->textWidth(text);
|
||||
}
|
||||
} else {
|
||||
thisOverlay = _overlays3D[id];
|
||||
if (thisOverlay) {
|
||||
if (typeid(*thisOverlay) == typeid(Text3DOverlay)) {
|
||||
return static_cast<Text3DOverlay*>(thisOverlay)->textWidth(text);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0.0f;
|
||||
}
|
||||
|
|
|
@ -65,6 +65,10 @@ public slots:
|
|||
/// returns whether the overlay's assets are loaded or not
|
||||
bool isLoaded(unsigned int id);
|
||||
|
||||
/// returns the width of the given text in the specified overlay if it is a text overlay: in pixels if it is a 2D text
|
||||
/// overlay; in meters if it is a 3D text overlay
|
||||
float textWidth(unsigned int id, const QString& text) const;
|
||||
|
||||
private:
|
||||
QMap<unsigned int, Overlay*> _overlays2D;
|
||||
QMap<unsigned int, Overlay*> _overlays3D;
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
const xColor DEFAULT_BACKGROUND_COLOR = { 0, 0, 0 };
|
||||
const float DEFAULT_MARGIN = 0.1f;
|
||||
const int FIXED_FONT_POINT_SIZE = 40;
|
||||
const float LINE_SCALE_RATIO = 1.2f;
|
||||
|
||||
Text3DOverlay::Text3DOverlay() :
|
||||
_backgroundColor(DEFAULT_BACKGROUND_COLOR),
|
||||
|
@ -87,11 +89,10 @@ void Text3DOverlay::render(RenderArgs* args) {
|
|||
glVertex3f(-halfDimensions.x, halfDimensions.y, SLIGHTLY_BEHIND);
|
||||
glEnd();
|
||||
|
||||
const int FIXED_FONT_POINT_SIZE = 40;
|
||||
const int FIXED_FONT_SCALING_RATIO = FIXED_FONT_POINT_SIZE * 40.0f; // this is a ratio determined through experimentation
|
||||
|
||||
// Same font properties as textWidth()
|
||||
TextRenderer* textRenderer = TextRenderer::getInstance(SANS_FONT_FAMILY, FIXED_FONT_POINT_SIZE);
|
||||
float LINE_SCALE_RATIO = 1.2f;
|
||||
float maxHeight = (float)textRenderer->calculateHeight("Xy") * LINE_SCALE_RATIO;
|
||||
|
||||
float scaleFactor = (maxHeight / FIXED_FONT_SCALING_RATIO) * _lineHeight;
|
||||
|
@ -179,4 +180,9 @@ void Text3DOverlay::setProperties(const QScriptValue& properties) {
|
|||
|
||||
}
|
||||
|
||||
|
||||
float Text3DOverlay::textWidth(const QString& text) const {
|
||||
QFont font(SANS_FONT_FAMILY, FIXED_FONT_POINT_SIZE); // Same font properties as render()
|
||||
QFontMetrics fontMetrics(font);
|
||||
float scaleFactor = _lineHeight * LINE_SCALE_RATIO / (float)FIXED_FONT_POINT_SIZE;
|
||||
return scaleFactor * (float)fontMetrics.width(qPrintable(text));
|
||||
}
|
||||
|
|
|
@ -48,6 +48,8 @@ public:
|
|||
|
||||
virtual void setProperties(const QScriptValue& properties);
|
||||
|
||||
float textWidth(const QString& text) const; // Meters
|
||||
|
||||
private:
|
||||
void enableClipPlane(GLenum plane, float x, float y, float z, float w);
|
||||
|
||||
|
|
|
@ -66,9 +66,8 @@ void TextOverlay::render(RenderArgs* args) {
|
|||
glVertex2f(_bounds.left(), _bounds.bottom());
|
||||
glEnd();
|
||||
|
||||
//TextRenderer(const char* family, int pointSize = -1, int weight = -1, bool italic = false,
|
||||
// EffectType effect = NO_EFFECT, int effectThickness = 1);
|
||||
TextRenderer* textRenderer = TextRenderer::getInstance(SANS_FONT_FAMILY, _fontSize, 50);
|
||||
// Same font properties as textWidth()
|
||||
TextRenderer* textRenderer = TextRenderer::getInstance(SANS_FONT_FAMILY, _fontSize, DEFAULT_FONT_WEIGHT);
|
||||
|
||||
const int leftAdjust = -1; // required to make text render relative to left edge of bounds
|
||||
const int topAdjust = -2; // required to make text render relative to top edge of bounds
|
||||
|
@ -126,3 +125,8 @@ void TextOverlay::setProperties(const QScriptValue& properties) {
|
|||
}
|
||||
|
||||
|
||||
float TextOverlay::textWidth(const QString& text) const {
|
||||
QFont font(SANS_FONT_FAMILY, _fontSize, DEFAULT_FONT_WEIGHT); // Same font properties as render()
|
||||
QFontMetrics fontMetrics(font);
|
||||
return fontMetrics.width(qPrintable(text));
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
const xColor DEFAULT_BACKGROUND_COLOR = { 0, 0, 0 };
|
||||
const int DEFAULT_MARGIN = 10;
|
||||
const int DEFAULT_FONTSIZE = 11;
|
||||
const int DEFAULT_FONT_WEIGHT = 50;
|
||||
|
||||
class TextOverlay : public Overlay2D {
|
||||
Q_OBJECT
|
||||
|
@ -53,6 +54,8 @@ public:
|
|||
|
||||
virtual void setProperties(const QScriptValue& properties);
|
||||
|
||||
float textWidth(const QString& text) const; // Pixels
|
||||
|
||||
private:
|
||||
|
||||
QString _text;
|
||||
|
|
|
@ -31,7 +31,6 @@ void injectorFromScriptValue(const QScriptValue& object, AudioInjector*& out) {
|
|||
|
||||
AudioInjector::AudioInjector(QObject* parent) :
|
||||
QObject(parent),
|
||||
_sound(NULL),
|
||||
_options(),
|
||||
_shouldStop(false),
|
||||
_loudness(0.0f),
|
||||
|
@ -42,7 +41,7 @@ AudioInjector::AudioInjector(QObject* parent) :
|
|||
}
|
||||
|
||||
AudioInjector::AudioInjector(Sound* sound, const AudioInjectorOptions& injectorOptions) :
|
||||
_sound(sound),
|
||||
_audioData(sound->getByteArray()),
|
||||
_options(injectorOptions),
|
||||
_shouldStop(false),
|
||||
_loudness(0.0f),
|
||||
|
@ -52,6 +51,18 @@ AudioInjector::AudioInjector(Sound* sound, const AudioInjectorOptions& injectorO
|
|||
{
|
||||
}
|
||||
|
||||
AudioInjector::AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions) :
|
||||
_audioData(audioData),
|
||||
_options(injectorOptions),
|
||||
_shouldStop(false),
|
||||
_loudness(0.0f),
|
||||
_isFinished(false),
|
||||
_currentSendPosition(0),
|
||||
_localBuffer(NULL)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
AudioInjector::~AudioInjector() {
|
||||
if (_localBuffer) {
|
||||
_localBuffer->stop();
|
||||
|
@ -76,11 +87,9 @@ void AudioInjector::injectAudio() {
|
|||
|
||||
void AudioInjector::injectLocally() {
|
||||
bool success = false;
|
||||
if (_localAudioInterface) {
|
||||
const QByteArray& soundByteArray = _sound->getByteArray();
|
||||
|
||||
if (soundByteArray.size() > 0) {
|
||||
_localBuffer = new AudioInjectorLocalBuffer(_sound->getByteArray(), this);
|
||||
if (_localAudioInterface) {
|
||||
if (_audioData.size() > 0) {
|
||||
_localBuffer = new AudioInjectorLocalBuffer(_audioData, this);
|
||||
_localBuffer->open(QIODevice::ReadOnly);
|
||||
_localBuffer->setShouldLoop(_options.loop);
|
||||
|
||||
|
@ -114,15 +123,13 @@ void AudioInjector::injectLocally() {
|
|||
const uchar MAX_INJECTOR_VOLUME = 0xFF;
|
||||
|
||||
void AudioInjector::injectToMixer() {
|
||||
QByteArray soundByteArray = _sound->getByteArray();
|
||||
|
||||
if (_currentSendPosition < 0 ||
|
||||
_currentSendPosition >= soundByteArray.size()) {
|
||||
_currentSendPosition >= _audioData.size()) {
|
||||
_currentSendPosition = 0;
|
||||
}
|
||||
|
||||
// make sure we actually have samples downloaded to inject
|
||||
if (soundByteArray.size()) {
|
||||
if (_audioData.size()) {
|
||||
|
||||
// setup the packet for injected audio
|
||||
QByteArray injectAudioPacket = byteArrayWithPopulatedHeader(PacketTypeInjectAudio);
|
||||
|
@ -172,15 +179,15 @@ void AudioInjector::injectToMixer() {
|
|||
|
||||
// loop to send off our audio in NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL byte chunks
|
||||
quint16 outgoingInjectedAudioSequenceNumber = 0;
|
||||
while (_currentSendPosition < soundByteArray.size() && !_shouldStop) {
|
||||
while (_currentSendPosition < _audioData.size() && !_shouldStop) {
|
||||
|
||||
int bytesToCopy = std::min(((_options.stereo) ? 2 : 1) * NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL,
|
||||
soundByteArray.size() - _currentSendPosition);
|
||||
_audioData.size() - _currentSendPosition);
|
||||
|
||||
// Measure the loudness of this frame
|
||||
_loudness = 0.0f;
|
||||
for (int i = 0; i < bytesToCopy; i += sizeof(int16_t)) {
|
||||
_loudness += abs(*reinterpret_cast<int16_t*>(soundByteArray.data() + _currentSendPosition + i)) /
|
||||
_loudness += abs(*reinterpret_cast<int16_t*>(_audioData.data() + _currentSendPosition + i)) /
|
||||
(MAX_SAMPLE_VALUE / 2.0f);
|
||||
}
|
||||
_loudness /= (float)(bytesToCopy / sizeof(int16_t));
|
||||
|
@ -203,7 +210,7 @@ void AudioInjector::injectToMixer() {
|
|||
|
||||
// copy the next NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL bytes to the packet
|
||||
memcpy(injectAudioPacket.data() + numPreAudioDataBytes,
|
||||
soundByteArray.data() + _currentSendPosition, bytesToCopy);
|
||||
_audioData.data() + _currentSendPosition, bytesToCopy);
|
||||
|
||||
// grab our audio mixer from the NodeList, if it exists
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
|
@ -217,7 +224,7 @@ void AudioInjector::injectToMixer() {
|
|||
|
||||
// send two packets before the first sleep so the mixer can start playback right away
|
||||
|
||||
if (_currentSendPosition != bytesToCopy && _currentSendPosition < soundByteArray.size()) {
|
||||
if (_currentSendPosition != bytesToCopy && _currentSendPosition < _audioData.size()) {
|
||||
// not the first packet and not done
|
||||
// sleep for the appropriate time
|
||||
int usecToSleep = (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - timer.nsecsElapsed() / 1000;
|
||||
|
@ -227,7 +234,7 @@ void AudioInjector::injectToMixer() {
|
|||
}
|
||||
}
|
||||
|
||||
if (shouldLoop && _currentSendPosition >= soundByteArray.size()) {
|
||||
if (shouldLoop && _currentSendPosition >= _audioData.size()) {
|
||||
_currentSendPosition = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ class AudioInjector : public QObject {
|
|||
public:
|
||||
AudioInjector(QObject* parent);
|
||||
AudioInjector(Sound* sound, const AudioInjectorOptions& injectorOptions);
|
||||
AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions);
|
||||
~AudioInjector();
|
||||
|
||||
bool isFinished() const { return _isFinished; }
|
||||
|
@ -51,7 +52,7 @@ private:
|
|||
void injectToMixer();
|
||||
void injectLocally();
|
||||
|
||||
Sound* _sound;
|
||||
QByteArray _audioData;
|
||||
AudioInjectorOptions _options;
|
||||
bool _shouldStop;
|
||||
float _loudness;
|
||||
|
|
|
@ -43,28 +43,32 @@ void AudioScriptingInterface::stopAllInjectors() {
|
|||
}
|
||||
|
||||
AudioInjector* AudioScriptingInterface::playSound(Sound* sound, const AudioInjectorOptions& injectorOptions) {
|
||||
|
||||
AudioInjector* injector = new AudioInjector(sound, injectorOptions);
|
||||
injector->setLocalAudioInterface(_localAudioInterface);
|
||||
|
||||
QThread* injectorThread = new QThread();
|
||||
|
||||
injector->moveToThread(injectorThread);
|
||||
|
||||
// start injecting when the injector thread starts
|
||||
connect(injectorThread, &QThread::started, injector, &AudioInjector::injectAudio);
|
||||
|
||||
// connect the right slots and signals so that the AudioInjector is killed once the injection is complete
|
||||
connect(injector, &AudioInjector::finished, injector, &AudioInjector::deleteLater);
|
||||
connect(injector, &AudioInjector::finished, injectorThread, &QThread::quit);
|
||||
connect(injector, &AudioInjector::finished, this, &AudioScriptingInterface::injectorStopped);
|
||||
connect(injectorThread, &QThread::finished, injectorThread, &QThread::deleteLater);
|
||||
|
||||
injectorThread->start();
|
||||
|
||||
_activeInjectors.append(QPointer<AudioInjector>(injector));
|
||||
|
||||
return injector;
|
||||
if (sound) {
|
||||
AudioInjector* injector = new AudioInjector(sound, injectorOptions);
|
||||
injector->setLocalAudioInterface(_localAudioInterface);
|
||||
|
||||
QThread* injectorThread = new QThread();
|
||||
|
||||
injector->moveToThread(injectorThread);
|
||||
|
||||
// start injecting when the injector thread starts
|
||||
connect(injectorThread, &QThread::started, injector, &AudioInjector::injectAudio);
|
||||
|
||||
// connect the right slots and signals so that the AudioInjector is killed once the injection is complete
|
||||
connect(injector, &AudioInjector::finished, injector, &AudioInjector::deleteLater);
|
||||
connect(injector, &AudioInjector::finished, injectorThread, &QThread::quit);
|
||||
connect(injector, &AudioInjector::finished, this, &AudioScriptingInterface::injectorStopped);
|
||||
connect(injectorThread, &QThread::finished, injectorThread, &QThread::deleteLater);
|
||||
|
||||
injectorThread->start();
|
||||
|
||||
_activeInjectors.append(QPointer<AudioInjector>(injector));
|
||||
|
||||
return injector;
|
||||
} else {
|
||||
qDebug() << "AudioScriptingInterface::playSound called with null Sound object.";
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioScriptingInterface::stopInjector(AudioInjector* injector) {
|
||||
|
|
|
@ -29,96 +29,27 @@
|
|||
#include "AudioEditBuffer.h"
|
||||
#include "Sound.h"
|
||||
|
||||
|
||||
QScriptValue soundToScriptValue(QScriptEngine* engine, Sound* const& in) {
|
||||
return engine->newQObject(in);
|
||||
QScriptValue soundToScriptValue(QScriptEngine* engine, SharedSoundPointer const& in) {
|
||||
return engine->newQObject(in.data());
|
||||
}
|
||||
|
||||
void soundFromScriptValue(const QScriptValue& object, Sound*& out) {
|
||||
out = qobject_cast<Sound*>(object.toQObject());
|
||||
void soundFromScriptValue(const QScriptValue &object, SharedSoundPointer &out) {
|
||||
out = SharedSoundPointer(qobject_cast<Sound*>(object.toQObject()));
|
||||
qDebug() << "Sound from script value" << out.data();
|
||||
}
|
||||
|
||||
// procedural audio version of Sound
|
||||
Sound::Sound(float volume, float frequency, float duration, float decay, QObject* parent) :
|
||||
QObject(parent),
|
||||
_isStereo(false)
|
||||
{
|
||||
static char monoAudioData[MAX_PACKET_SIZE];
|
||||
static int16_t* monoAudioSamples = (int16_t*)(monoAudioData);
|
||||
|
||||
float t;
|
||||
const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
||||
const float MAX_VOLUME = 32000.f;
|
||||
const float MAX_DURATION = 2.f;
|
||||
const float MIN_AUDIBLE_VOLUME = 0.001f;
|
||||
const float NOISE_MAGNITUDE = 0.02f;
|
||||
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
|
||||
const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
|
||||
int numSamples = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; // we add sounds in chunks of this many samples
|
||||
|
||||
int chunkStartingSample = 0;
|
||||
float waveFrequency = (frequency / SAMPLE_RATE) * TWO_PI;
|
||||
while (volume > 0.f) {
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
t = (float)chunkStartingSample + (float)i;
|
||||
float sample = sinf(t * waveFrequency);
|
||||
sample += ((randFloat() - 0.5f) * NOISE_MAGNITUDE);
|
||||
sample *= volume * MAX_VOLUME;
|
||||
|
||||
monoAudioSamples[i] = glm::clamp((int)sample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
volume *= (1.f - decay);
|
||||
}
|
||||
// add the monoAudioSamples to our actual output Byte Array
|
||||
_byteArray.append(monoAudioData, numSamples * sizeof(int16_t));
|
||||
chunkStartingSample += numSamples;
|
||||
duration = glm::clamp(duration - (AUDIO_CALLBACK_MSECS / 1000.f), 0.f, MAX_DURATION);
|
||||
//qDebug() << "decaying... _duration=" << _duration;
|
||||
if (duration == 0.f || (volume < MIN_AUDIBLE_VOLUME)) {
|
||||
volume = 0.f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Sound::Sound(const QUrl& sampleURL, bool isStereo, QObject* parent) :
|
||||
QObject(parent),
|
||||
Sound::Sound(const QUrl& url, bool isStereo) :
|
||||
Resource(url),
|
||||
_isStereo(isStereo),
|
||||
_hasDownloaded(false)
|
||||
_isReady(false)
|
||||
{
|
||||
// assume we have a QApplication or QCoreApplication instance and use the
|
||||
// QNetworkAccess manager to grab the raw audio file at the given URL
|
||||
|
||||
QNetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance();
|
||||
|
||||
qDebug() << "Requesting audio file" << sampleURL.toDisplayString();
|
||||
|
||||
QNetworkReply* soundDownload = networkAccessManager.get(QNetworkRequest(sampleURL));
|
||||
connect(soundDownload, &QNetworkReply::finished, this, &Sound::replyFinished);
|
||||
connect(soundDownload, SIGNAL(error(QNetworkReply::NetworkError)),
|
||||
this, SLOT(replyError(QNetworkReply::NetworkError)));
|
||||
}
|
||||
|
||||
Sound::Sound(const QByteArray byteArray, QObject* parent) :
|
||||
QObject(parent),
|
||||
_byteArray(byteArray),
|
||||
_isStereo(false),
|
||||
_hasDownloaded(true)
|
||||
{
|
||||
}
|
||||
|
||||
void Sound::append(const QByteArray byteArray) {
|
||||
_byteArray.append(byteArray);
|
||||
}
|
||||
|
||||
void Sound::replyFinished() {
|
||||
|
||||
QNetworkReply* reply = reinterpret_cast<QNetworkReply*>(sender());
|
||||
|
||||
void Sound::downloadFinished(QNetworkReply* reply) {
|
||||
// replace our byte array with the downloaded data
|
||||
QByteArray rawAudioByteArray = reply->readAll();
|
||||
|
||||
// foreach(QByteArray b, reply->rawHeaderList())
|
||||
// qDebug() << b.constData() << ": " << reply->rawHeader(b).constData();
|
||||
|
||||
if (reply->hasRawHeader("Content-Type")) {
|
||||
|
||||
QByteArray headerContentType = reply->rawHeader("Content-Type");
|
||||
|
@ -141,12 +72,7 @@ void Sound::replyFinished() {
|
|||
qDebug() << "Network reply without 'Content-Type'.";
|
||||
}
|
||||
|
||||
_hasDownloaded = true;
|
||||
}
|
||||
|
||||
void Sound::replyError(QNetworkReply::NetworkError code) {
|
||||
QNetworkReply* reply = reinterpret_cast<QNetworkReply*>(sender());
|
||||
qDebug() << "Error downloading sound file at" << reply->url().toString() << "-" << reply->errorString();
|
||||
_isReady = true;
|
||||
}
|
||||
|
||||
void Sound::downSample(const QByteArray& rawAudioByteArray) {
|
||||
|
|
|
@ -16,38 +16,37 @@
|
|||
#include <QtNetwork/QNetworkReply>
|
||||
#include <QtScript/qscriptengine.h>
|
||||
|
||||
class Sound : public QObject {
|
||||
#include <ResourceCache.h>
|
||||
|
||||
class Sound : public Resource {
|
||||
Q_OBJECT
|
||||
|
||||
Q_PROPERTY(bool downloaded READ hasDownloaded)
|
||||
Q_PROPERTY(bool downloaded READ isReady)
|
||||
public:
|
||||
Sound(const QUrl& sampleURL, bool isStereo = false, QObject* parent = NULL);
|
||||
Sound(float volume, float frequency, float duration, float decay, QObject* parent = NULL);
|
||||
Sound(const QByteArray byteArray, QObject* parent = NULL);
|
||||
void append(const QByteArray byteArray);
|
||||
Sound(const QUrl& url, bool isStereo = false);
|
||||
|
||||
bool isStereo() const { return _isStereo; }
|
||||
bool hasDownloaded() const { return _hasDownloaded; }
|
||||
|
||||
bool isReady() const { return _isReady; }
|
||||
|
||||
const QByteArray& getByteArray() { return _byteArray; }
|
||||
|
||||
private:
|
||||
QByteArray _byteArray;
|
||||
bool _isStereo;
|
||||
bool _hasDownloaded;
|
||||
bool _isReady;
|
||||
|
||||
void trimFrames();
|
||||
void downSample(const QByteArray& rawAudioByteArray);
|
||||
void interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||
|
||||
private slots:
|
||||
void replyFinished();
|
||||
void replyError(QNetworkReply::NetworkError code);
|
||||
|
||||
virtual void downloadFinished(QNetworkReply* reply);
|
||||
};
|
||||
|
||||
Q_DECLARE_METATYPE(Sound*)
|
||||
typedef QSharedPointer<Sound> SharedSoundPointer;
|
||||
|
||||
QScriptValue soundToScriptValue(QScriptEngine* engine, Sound* const& in);
|
||||
void soundFromScriptValue(const QScriptValue& object, Sound*& out);
|
||||
Q_DECLARE_METATYPE(SharedSoundPointer)
|
||||
|
||||
QScriptValue soundToScriptValue(QScriptEngine* engine, SharedSoundPointer const& in);
|
||||
void soundFromScriptValue(const QScriptValue& object, SharedSoundPointer& out);
|
||||
|
||||
#endif // hifi_Sound_h
|
||||
|
|
43
libraries/audio/src/SoundCache.cpp
Normal file
43
libraries/audio/src/SoundCache.cpp
Normal file
|
@ -0,0 +1,43 @@
|
|||
//
|
||||
// SoundCache.cpp
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Created by Stephen Birarda on 2014-11-13.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <qthread.h>
|
||||
|
||||
#include "SoundCache.h"
|
||||
|
||||
static int soundPointerMetaTypeId = qRegisterMetaType<SharedSoundPointer>();
|
||||
|
||||
SoundCache& SoundCache::getInstance() {
|
||||
static SoundCache staticInstance;
|
||||
return staticInstance;
|
||||
}
|
||||
|
||||
SoundCache::SoundCache(QObject* parent) :
|
||||
ResourceCache(parent)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
SharedSoundPointer SoundCache::getSound(const QUrl& url) {
|
||||
if (QThread::currentThread() != thread()) {
|
||||
SharedSoundPointer result;
|
||||
QMetaObject::invokeMethod(this, "getSound", Qt::BlockingQueuedConnection,
|
||||
Q_RETURN_ARG(SharedSoundPointer, result), Q_ARG(const QUrl&, url));
|
||||
return result;
|
||||
}
|
||||
return getResource(url).staticCast<Sound>();
|
||||
}
|
||||
|
||||
QSharedPointer<Resource> SoundCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
|
||||
bool delayLoad, const void* extra) {
|
||||
qDebug() << "Requesting sound at" << url.toString();
|
||||
return QSharedPointer<Resource>(new Sound(url), &Resource::allReferencesCleared);
|
||||
}
|
34
libraries/audio/src/SoundCache.h
Normal file
34
libraries/audio/src/SoundCache.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
//
|
||||
// SoundCache.h
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Created by Stephen Birarda on 2014-11-13.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_SoundCache_h
|
||||
#define hifi_SoundCache_h
|
||||
|
||||
#include <ResourceCache.h>
|
||||
|
||||
#include "Sound.h"
|
||||
|
||||
/// Scriptable interface for sound loading.
|
||||
class SoundCache : public ResourceCache {
|
||||
Q_OBJECT
|
||||
public:
|
||||
static SoundCache& getInstance();
|
||||
|
||||
Q_INVOKABLE SharedSoundPointer getSound(const QUrl& url);
|
||||
|
||||
protected:
|
||||
virtual QSharedPointer<Resource> createResource(const QUrl& url,
|
||||
const QSharedPointer<Resource>& fallback, bool delayLoad, const void* extra);
|
||||
private:
|
||||
SoundCache(QObject* parent = NULL);
|
||||
};
|
||||
|
||||
#endif // hifi_SoundCache_h
|
|
@ -168,7 +168,7 @@ void Player::setupAudioThread() {
|
|||
_audioThread = new QThread();
|
||||
_options.position = _avatar->getPosition();
|
||||
_options.orientation = _avatar->getOrientation();
|
||||
_injector.reset(new AudioInjector(_recording->getAudio(), _options), &QObject::deleteLater);
|
||||
_injector.reset(new AudioInjector(_recording->getAudioData(), _options), &QObject::deleteLater);
|
||||
_injector->moveToThread(_audioThread);
|
||||
_audioThread->start();
|
||||
QMetaObject::invokeMethod(_injector.data(), "injectAudio", Qt::QueuedConnection);
|
||||
|
|
|
@ -43,13 +43,6 @@ void RecordingFrame::setBlendshapeCoefficients(QVector<float> blendshapeCoeffici
|
|||
_blendshapeCoefficients = blendshapeCoefficients;
|
||||
}
|
||||
|
||||
Recording::Recording() : _audio(NULL) {
|
||||
}
|
||||
|
||||
Recording::~Recording() {
|
||||
delete _audio;
|
||||
}
|
||||
|
||||
int Recording::getLength() const {
|
||||
if (_timestamps.isEmpty()) {
|
||||
return 0;
|
||||
|
@ -77,19 +70,10 @@ void Recording::addFrame(int timestamp, RecordingFrame &frame) {
|
|||
_frames << frame;
|
||||
}
|
||||
|
||||
void Recording::addAudioPacket(const QByteArray& byteArray) {
|
||||
if (!_audio) {
|
||||
_audio = new Sound(byteArray);
|
||||
return;
|
||||
}
|
||||
_audio->append(byteArray);
|
||||
}
|
||||
|
||||
void Recording::clear() {
|
||||
_timestamps.clear();
|
||||
_frames.clear();
|
||||
delete _audio;
|
||||
_audio = NULL;
|
||||
_audioData.clear();
|
||||
}
|
||||
|
||||
void writeVec3(QDataStream& stream, const glm::vec3& value) {
|
||||
|
@ -324,7 +308,7 @@ void writeRecordingToFile(RecordingPointer recording, const QString& filename) {
|
|||
fileStream << buffer;
|
||||
}
|
||||
|
||||
fileStream << recording->_audio->getByteArray();
|
||||
fileStream << recording->getAudioData();
|
||||
|
||||
qint64 writingTime = timer.restart();
|
||||
// Write data length and CRC-16
|
||||
|
@ -367,7 +351,7 @@ void writeRecordingToFile(RecordingPointer recording, const QString& filename) {
|
|||
|
||||
qDebug() << "Recording:";
|
||||
qDebug() << "Total frames:" << recording->getFrameNumber();
|
||||
qDebug() << "Audio array:" << recording->getAudio()->getByteArray().size();
|
||||
qDebug() << "Audio array:" << recording->getAudioData().size();
|
||||
}
|
||||
|
||||
qint64 checksumTime = timer.elapsed();
|
||||
|
@ -642,7 +626,7 @@ RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString
|
|||
|
||||
qDebug() << "Recording:";
|
||||
qDebug() << "Total frames:" << recording->getFrameNumber();
|
||||
qDebug() << "Audio array:" << recording->getAudio()->getByteArray().size();
|
||||
qDebug() << "Audio array:" << recording->getAudioData().size();
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -48,9 +48,6 @@ public:
|
|||
/// Stores a recording
|
||||
class Recording {
|
||||
public:
|
||||
Recording();
|
||||
~Recording();
|
||||
|
||||
bool isEmpty() const { return _timestamps.isEmpty(); }
|
||||
int getLength() const; // in ms
|
||||
|
||||
|
@ -58,11 +55,11 @@ public:
|
|||
int getFrameNumber() const { return _frames.size(); }
|
||||
qint32 getFrameTimestamp(int i) const;
|
||||
const RecordingFrame& getFrame(int i) const;
|
||||
Sound* getAudio() const { return _audio; }
|
||||
const QByteArray& getAudioData() const { return _audioData; }
|
||||
|
||||
protected:
|
||||
void addFrame(int timestamp, RecordingFrame& frame);
|
||||
void addAudioPacket(const QByteArray& byteArray);
|
||||
void addAudioPacket(const QByteArray& byteArray) { _audioData.append(byteArray); }
|
||||
void clear();
|
||||
|
||||
private:
|
||||
|
@ -70,7 +67,7 @@ private:
|
|||
QVector<qint32> _timestamps;
|
||||
QVector<RecordingFrame> _frames;
|
||||
|
||||
Sound* _audio;
|
||||
QByteArray _audioData;
|
||||
|
||||
friend class Recorder;
|
||||
friend class Player;
|
||||
|
|
|
@ -22,7 +22,9 @@
|
|||
|
||||
ResourceCache::ResourceCache(QObject* parent) :
|
||||
QObject(parent),
|
||||
_lastLRUKey(0) {
|
||||
_lastLRUKey(0)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
ResourceCache::~ResourceCache() {
|
||||
|
@ -291,7 +293,7 @@ void Resource::makeRequest() {
|
|||
connect(_reply, SIGNAL(downloadProgress(qint64,qint64)), SLOT(handleDownloadProgress(qint64,qint64)));
|
||||
connect(_reply, SIGNAL(error(QNetworkReply::NetworkError)), SLOT(handleReplyError()));
|
||||
connect(_reply, SIGNAL(finished()), SLOT(handleReplyFinished()));
|
||||
|
||||
|
||||
_replyTimer = new QTimer(this);
|
||||
connect(_replyTimer, SIGNAL(timeout()), SLOT(handleReplyTimeout()));
|
||||
_replyTimer->setSingleShot(true);
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <NetworkAccessManager.h>
|
||||
#include <NodeList.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <Sound.h>
|
||||
#include <UUID.h>
|
||||
#include <VoxelConstants.h>
|
||||
#include <VoxelDetail.h>
|
||||
|
@ -47,14 +46,6 @@
|
|||
VoxelsScriptingInterface ScriptEngine::_voxelsScriptingInterface;
|
||||
EntityScriptingInterface ScriptEngine::_entityScriptingInterface;
|
||||
|
||||
static QScriptValue soundConstructor(QScriptContext* context, QScriptEngine* engine) {
|
||||
QUrl soundURL = QUrl(context->argument(0).toString());
|
||||
bool isStereo = context->argument(1).toBool();
|
||||
QScriptValue soundScriptValue = engine->newQObject(new Sound(soundURL, isStereo), QScriptEngine::ScriptOwnership);
|
||||
|
||||
return soundScriptValue;
|
||||
}
|
||||
|
||||
static QScriptValue debugPrint(QScriptContext* context, QScriptEngine* engine){
|
||||
qDebug() << "script:print()<<" << context->argument(0).toString();
|
||||
QString message = context->argument(0).toString()
|
||||
|
@ -263,10 +254,6 @@ void ScriptEngine::init() {
|
|||
QScriptValue printConstructorValue = newFunction(debugPrint);
|
||||
globalObject().setProperty("print", printConstructorValue);
|
||||
|
||||
QScriptValue soundConstructorValue = newFunction(soundConstructor);
|
||||
QScriptValue soundMetaObject = newQMetaObject(&Sound::staticMetaObject, soundConstructorValue);
|
||||
globalObject().setProperty("Sound", soundMetaObject);
|
||||
|
||||
QScriptValue localVoxelsValue = scriptValueFromQMetaObject<LocalVoxels>();
|
||||
globalObject().setProperty("LocalVoxels", localVoxelsValue);
|
||||
|
||||
|
|
|
@ -119,8 +119,6 @@ QString LogHandler::printMessage(LogMsgType type, const QMessageLogContext& cont
|
|||
char dateString[100];
|
||||
strftime(dateString, sizeof(dateString), DATE_STRING_FORMAT, localTime);
|
||||
|
||||
prefixString.append(QString(" [%1]").arg(dateString));
|
||||
|
||||
if (_shouldOutputPID) {
|
||||
prefixString.append(QString(" [%1").arg(getpid()));
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
|
||||
#include <bitset>
|
||||
|
||||
#include <memory>
|
||||
|
||||
class Transform {
|
||||
public:
|
||||
typedef glm::mat4 Mat4;
|
||||
|
@ -30,16 +32,16 @@ public:
|
|||
typedef glm::quat Quat;
|
||||
|
||||
Transform() :
|
||||
_translation(0),
|
||||
_rotation(1.0f, 0, 0, 0),
|
||||
_scale(1.0f),
|
||||
_translation(0),
|
||||
_flags(FLAG_CACHE_INVALID_BITSET) // invalid cache
|
||||
{
|
||||
}
|
||||
Transform(const Transform& transform) :
|
||||
_translation(transform._translation),
|
||||
_rotation(transform._rotation),
|
||||
_scale(transform._scale),
|
||||
_translation(transform._translation),
|
||||
_flags(transform._flags)
|
||||
{
|
||||
invalidCache();
|
||||
|
@ -49,6 +51,15 @@ public:
|
|||
}
|
||||
~Transform() {}
|
||||
|
||||
Transform& operator=(const Transform& transform) {
|
||||
_rotation = transform._rotation;
|
||||
_scale = transform._scale;
|
||||
_translation = transform._translation;
|
||||
_flags = transform._flags;
|
||||
invalidCache();
|
||||
return (*this);
|
||||
}
|
||||
|
||||
void setIdentity();
|
||||
|
||||
const Vec3& getTranslation() const;
|
||||
|
@ -89,7 +100,6 @@ public:
|
|||
// Left will be inversed before the multiplication
|
||||
static Transform& inverseMult(Transform& result, const Transform& left, const Transform& right);
|
||||
|
||||
|
||||
protected:
|
||||
|
||||
enum Flag {
|
||||
|
@ -111,14 +121,15 @@ protected:
|
|||
|
||||
|
||||
// TRS
|
||||
Vec3 _translation;
|
||||
Quat _rotation;
|
||||
Vec3 _scale;
|
||||
Vec3 _translation;
|
||||
|
||||
mutable Flags _flags;
|
||||
|
||||
// Cached transform
|
||||
mutable Mat4 _matrix;
|
||||
// TODO: replace this auto ptr by a "unique ptr" as soon as we are compiling in C++11
|
||||
mutable std::auto_ptr<Mat4> _matrix;
|
||||
|
||||
bool isCacheInvalid() const { return _flags[FLAG_CACHE_INVALID]; }
|
||||
void validCache() const { _flags.set(FLAG_CACHE_INVALID, false); }
|
||||
|
@ -135,6 +146,7 @@ protected:
|
|||
void flagNonUniform() { _flags.set(FLAG_NON_UNIFORM, true); }
|
||||
|
||||
void updateCache() const;
|
||||
Mat4& getCachedMatrix(Mat4& result) const;
|
||||
};
|
||||
|
||||
inline void Transform::setIdentity() {
|
||||
|
@ -271,8 +283,25 @@ inline void Transform::postScale(const Vec3& scale) {
|
|||
}
|
||||
|
||||
inline Transform::Mat4& Transform::getMatrix(Transform::Mat4& result) const {
|
||||
updateCache();
|
||||
result = _matrix;
|
||||
if (isRotating()) {
|
||||
Mat3 rot = glm::mat3_cast(_rotation);
|
||||
|
||||
if (isScaling()) {
|
||||
rot[0] *= _scale.x;
|
||||
rot[1] *= _scale.y;
|
||||
rot[2] *= _scale.z;
|
||||
}
|
||||
|
||||
result[0] = Vec4(rot[0], 0.f);
|
||||
result[1] = Vec4(rot[1], 0.f);
|
||||
result[2] = Vec4(rot[2], 0.f);
|
||||
} else {
|
||||
result[0] = Vec4(_scale.x, 0.f, 0.f, 0.f);
|
||||
result[1] = Vec4(0.f, _scale.y, 0.f, 0.f);
|
||||
result[2] = Vec4(0.f, 0.f, _scale.z, 0.f);
|
||||
}
|
||||
|
||||
result[3] = Vec4(_translation, 1.0f);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -369,27 +398,18 @@ inline Transform& Transform::inverseMult( Transform& result, const Transform& le
|
|||
return result;
|
||||
}
|
||||
|
||||
inline Transform::Mat4& Transform::getCachedMatrix(Transform::Mat4& result) const {
|
||||
updateCache();
|
||||
result = (*_matrix);
|
||||
return result;
|
||||
}
|
||||
|
||||
inline void Transform::updateCache() const {
|
||||
if (isCacheInvalid()) {
|
||||
if (isRotating()) {
|
||||
Mat3 rot = glm::mat3_cast(_rotation);
|
||||
|
||||
if (isScaling()) {
|
||||
rot[0] *= _scale.x;
|
||||
rot[1] *= _scale.y;
|
||||
rot[2] *= _scale.z;
|
||||
}
|
||||
|
||||
_matrix[0] = Vec4(rot[0], 0.f);
|
||||
_matrix[1] = Vec4(rot[1], 0.f);
|
||||
_matrix[2] = Vec4(rot[2], 0.f);
|
||||
} else {
|
||||
_matrix[0] = Vec4(_scale.x, 0.f, 0.f, 0.f);
|
||||
_matrix[1] = Vec4(0.f, _scale.y, 0.f, 0.f);
|
||||
_matrix[2] = Vec4(0.f, 0.f, _scale.z, 0.f);
|
||||
if (!_matrix.get()) {
|
||||
_matrix.reset(new Mat4());
|
||||
}
|
||||
|
||||
_matrix[3] = Vec4(_translation, 1.0f);
|
||||
getMatrix((*_matrix));
|
||||
validCache();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue