mirror of
https://github.com/overte-org/overte.git
synced 2025-08-06 18:50:00 +02:00
changed downstream re-sampling to occur right after parsing packet
This commit is contained in:
parent
5a51950771
commit
4d7d6f1e25
12 changed files with 993 additions and 48 deletions
|
@ -104,7 +104,7 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, A
|
||||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
PositionalAudioStream* stream = i.value();
|
PositionalAudioStream* stream = i.value();
|
||||||
if (stream->popFrames(1)) {
|
if (stream->popFrames(1, true) > 0) {
|
||||||
// this is a ring buffer that is ready to go
|
// this is a ring buffer that is ready to go
|
||||||
|
|
||||||
// calculate the trailing avg loudness for the next frame
|
// calculate the trailing avg loudness for the next frame
|
||||||
|
|
674
examples/bot_procedural.js
Normal file
674
examples/bot_procedural.js
Normal file
|
@ -0,0 +1,674 @@
|
||||||
|
//
|
||||||
|
// bot_procedural.js
|
||||||
|
// hifi
|
||||||
|
//
|
||||||
|
// Created by Ben Arnold on 7/29/2013
|
||||||
|
//
|
||||||
|
// Copyright (c) 2014 HighFidelity, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
// This is an example script that demonstrates an NPC avatar.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
|
||||||
|
//For procedural walk animation
|
||||||
|
Script.include("http://s3-us-west-1.amazonaws.com/highfidelity-public/scripts/proceduralAnimationAPI.js");
|
||||||
|
|
||||||
|
var procAnimAPI = new ProcAnimAPI();
|
||||||
|
|
||||||
|
function getRandomFloat(min, max) {
|
||||||
|
return Math.random() * (max - min) + min;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getRandomInt (min, max) {
|
||||||
|
return Math.floor(Math.random() * (max - min + 1)) + min;
|
||||||
|
}
|
||||||
|
|
||||||
|
function printVector(string, vector) {
|
||||||
|
print(string + " " + vector.x + ", " + vector.y + ", " + vector.z);
|
||||||
|
}
|
||||||
|
|
||||||
|
var CHANCE_OF_MOVING = 0.005;
|
||||||
|
var CHANCE_OF_SOUND = 0.005;
|
||||||
|
var CHANCE_OF_HEAD_TURNING = 0.01;
|
||||||
|
var CHANCE_OF_BIG_MOVE = 1.0;
|
||||||
|
|
||||||
|
var isMoving = false;
|
||||||
|
var isTurningHead = false;
|
||||||
|
var isPlayingAudio = false;
|
||||||
|
|
||||||
|
var X_MIN = 0.50;
|
||||||
|
var X_MAX = 15.60;
|
||||||
|
var Z_MIN = 0.50;
|
||||||
|
var Z_MAX = 15.10;
|
||||||
|
var Y_FEET = 0.0;
|
||||||
|
var AVATAR_PELVIS_HEIGHT = 0.84;
|
||||||
|
var Y_PELVIS = Y_FEET + AVATAR_PELVIS_HEIGHT;
|
||||||
|
var MAX_PELVIS_DELTA = 2.5;
|
||||||
|
|
||||||
|
var MOVE_RANGE_SMALL = 3.0;
|
||||||
|
var MOVE_RANGE_BIG = 10.0;
|
||||||
|
var TURN_RANGE = 70.0;
|
||||||
|
var STOP_TOLERANCE = 0.05;
|
||||||
|
var MOVE_RATE = 0.05;
|
||||||
|
var TURN_RATE = 0.2;
|
||||||
|
var HEAD_TURN_RATE = 0.05;
|
||||||
|
var PITCH_RANGE = 15.0;
|
||||||
|
var YAW_RANGE = 35.0;
|
||||||
|
|
||||||
|
var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) };
|
||||||
|
var targetPosition = { x: 0, y: 0, z: 0 };
|
||||||
|
var targetOrientation = { x: 0, y: 0, z: 0, w: 0 };
|
||||||
|
var currentOrientation = { x: 0, y: 0, z: 0, w: 0 };
|
||||||
|
var targetHeadPitch = 0.0;
|
||||||
|
var targetHeadYaw = 0.0;
|
||||||
|
|
||||||
|
var basePelvisHeight = 0.0;
|
||||||
|
var pelvisOscillatorPosition = 0.0;
|
||||||
|
var pelvisOscillatorVelocity = 0.0;
|
||||||
|
|
||||||
|
function clamp(val, min, max){
|
||||||
|
return Math.max(min, Math.min(max, val))
|
||||||
|
}
|
||||||
|
|
||||||
|
//Array of all valid bot numbers
|
||||||
|
var validBotNumbers = [];
|
||||||
|
|
||||||
|
// right now we only use bot 63, since many other bots have messed up skeletons and LOD issues
|
||||||
|
var botNumber = 63;//getRandomInt(0, 99);
|
||||||
|
|
||||||
|
var newFaceFilePrefix = "ron";
|
||||||
|
|
||||||
|
var newBodyFilePrefix = "bot" + botNumber;
|
||||||
|
|
||||||
|
// set the face model fst using the bot number
|
||||||
|
// there is no need to change the body model - we're using the default
|
||||||
|
Avatar.faceModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newFaceFilePrefix + ".fst";
|
||||||
|
Avatar.skeletonModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newBodyFilePrefix + "_a.fst";
|
||||||
|
Avatar.billboardURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/billboards/bot" + botNumber + ".png";
|
||||||
|
|
||||||
|
Agent.isAvatar = true;
|
||||||
|
Agent.isListeningToAudioStream = true;
|
||||||
|
|
||||||
|
// change the avatar's position to the random one
|
||||||
|
Avatar.position = firstPosition;
|
||||||
|
basePelvisHeight = firstPosition.y;
|
||||||
|
printVector("New dancer, position = ", Avatar.position);
|
||||||
|
|
||||||
|
function loadSounds() {
|
||||||
|
var sound_filenames = ["AB1.raw", "Anchorman2.raw", "B1.raw", "B1.raw", "Bale1.raw", "Bandcamp.raw",
|
||||||
|
"Big1.raw", "Big2.raw", "Brian1.raw", "Buster1.raw", "CES1.raw", "CES2.raw", "CES3.raw", "CES4.raw",
|
||||||
|
"Carrie1.raw", "Carrie3.raw", "Charlotte1.raw", "EN1.raw", "EN2.raw", "EN3.raw", "Eugene1.raw", "Francesco1.raw",
|
||||||
|
"Italian1.raw", "Japanese1.raw", "Leigh1.raw", "Lucille1.raw", "Lucille2.raw", "MeanGirls.raw", "Murray2.raw",
|
||||||
|
"Nigel1.raw", "PennyLane.raw", "Pitt1.raw", "Ricardo.raw", "SN.raw", "Sake1.raw", "Samantha1.raw", "Samantha2.raw",
|
||||||
|
"Spicoli1.raw", "Supernatural.raw", "Swearengen1.raw", "TheDude.raw", "Tony.raw", "Triumph1.raw", "Uma1.raw",
|
||||||
|
"Walken1.raw", "Walken2.raw", "Z1.raw", "Z2.raw"
|
||||||
|
];
|
||||||
|
|
||||||
|
var footstep_filenames = ["FootstepW2Left-12db.wav", "FootstepW2Right-12db.wav", "FootstepW3Left-12db.wav", "FootstepW3Right-12db.wav",
|
||||||
|
"FootstepW5Left-12db.wav", "FootstepW5Right-12db.wav"];
|
||||||
|
|
||||||
|
var SOUND_BASE_URL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Cocktail+Party+Snippets/Raws/";
|
||||||
|
|
||||||
|
var FOOTSTEP_BASE_URL = "http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/Footsteps/";
|
||||||
|
|
||||||
|
for (var i = 0; i < sound_filenames.length; i++) {
|
||||||
|
sounds.push(new Sound(SOUND_BASE_URL + sound_filenames[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var i = 0; i < footstep_filenames.length; i++) {
|
||||||
|
footstepSounds.push(new Sound(FOOTSTEP_BASE_URL + footstep_filenames[i]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sounds = [];
|
||||||
|
var footstepSounds = [];
|
||||||
|
loadSounds();
|
||||||
|
|
||||||
|
|
||||||
|
function playRandomSound() {
|
||||||
|
if (!Agent.isPlayingAvatarSound) {
|
||||||
|
var whichSound = Math.floor((Math.random() * sounds.length));
|
||||||
|
Agent.playAvatarSound(sounds[whichSound]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function playRandomFootstepSound() {
|
||||||
|
|
||||||
|
var whichSound = Math.floor((Math.random() * footstepSounds.length));
|
||||||
|
var options = new AudioInjectionOptions();
|
||||||
|
options.position = Avatar.position;
|
||||||
|
options.volume = 1.0;
|
||||||
|
Audio.playSound(footstepSounds[whichSound], options);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// ************************************ Facial Animation **********************************
|
||||||
|
var allBlendShapes = [];
|
||||||
|
var targetBlendCoefficient = [];
|
||||||
|
var currentBlendCoefficient = [];
|
||||||
|
|
||||||
|
//Blendshape constructor
|
||||||
|
function addBlendshapeToPose(pose, shapeIndex, val) {
|
||||||
|
var index = pose.blendShapes.length;
|
||||||
|
pose.blendShapes[index] = {shapeIndex: shapeIndex, val: val };
|
||||||
|
}
|
||||||
|
//The mood of the avatar, determines face. 0 = happy, 1 = angry, 2 = sad.
|
||||||
|
|
||||||
|
//Randomly pick avatar mood. 80% happy, 10% mad 10% sad
|
||||||
|
var randMood = Math.floor(Math.random() * 11);
|
||||||
|
var avatarMood;
|
||||||
|
if (randMood == 0) {
|
||||||
|
avatarMood = 1;
|
||||||
|
} else if (randMood == 2) {
|
||||||
|
avatarMood = 2;
|
||||||
|
} else {
|
||||||
|
avatarMood = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentExpression = -1;
|
||||||
|
//Face pose constructor
|
||||||
|
var happyPoses = [];
|
||||||
|
|
||||||
|
happyPoses[0] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(happyPoses[0], 28, 0.7); //MouthSmile_L
|
||||||
|
addBlendshapeToPose(happyPoses[0], 29, 0.7); //MouthSmile_R
|
||||||
|
|
||||||
|
happyPoses[1] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(happyPoses[1], 28, 1.0); //MouthSmile_L
|
||||||
|
addBlendshapeToPose(happyPoses[1], 29, 1.0); //MouthSmile_R
|
||||||
|
addBlendshapeToPose(happyPoses[1], 21, 0.2); //JawOpen
|
||||||
|
|
||||||
|
happyPoses[2] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(happyPoses[2], 28, 1.0); //MouthSmile_L
|
||||||
|
addBlendshapeToPose(happyPoses[2], 29, 1.0); //MouthSmile_R
|
||||||
|
addBlendshapeToPose(happyPoses[2], 21, 0.5); //JawOpen
|
||||||
|
addBlendshapeToPose(happyPoses[2], 46, 1.0); //CheekSquint_L
|
||||||
|
addBlendshapeToPose(happyPoses[2], 47, 1.0); //CheekSquint_R
|
||||||
|
addBlendshapeToPose(happyPoses[2], 17, 1.0); //BrowsU_L
|
||||||
|
addBlendshapeToPose(happyPoses[2], 18, 1.0); //BrowsU_R
|
||||||
|
|
||||||
|
var angryPoses = [];
|
||||||
|
|
||||||
|
angryPoses[0] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(angryPoses[0], 26, 0.6); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(angryPoses[0], 27, 0.6); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(angryPoses[0], 14, 0.6); //BrowsD_L
|
||||||
|
addBlendshapeToPose(angryPoses[0], 15, 0.6); //BrowsD_R
|
||||||
|
|
||||||
|
angryPoses[1] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(angryPoses[1], 26, 0.9); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(angryPoses[1], 27, 0.9); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(angryPoses[1], 14, 0.9); //BrowsD_L
|
||||||
|
addBlendshapeToPose(angryPoses[1], 15, 0.9); //BrowsD_R
|
||||||
|
|
||||||
|
angryPoses[2] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(angryPoses[2], 26, 1.0); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(angryPoses[2], 27, 1.0); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(angryPoses[2], 14, 1.0); //BrowsD_L
|
||||||
|
addBlendshapeToPose(angryPoses[2], 15, 1.0); //BrowsD_R
|
||||||
|
addBlendshapeToPose(angryPoses[2], 21, 0.5); //JawOpen
|
||||||
|
addBlendshapeToPose(angryPoses[2], 46, 1.0); //CheekSquint_L
|
||||||
|
addBlendshapeToPose(angryPoses[2], 47, 1.0); //CheekSquint_R
|
||||||
|
|
||||||
|
var sadPoses = [];
|
||||||
|
|
||||||
|
sadPoses[0] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(sadPoses[0], 26, 0.6); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(sadPoses[0], 27, 0.6); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(sadPoses[0], 16, 0.2); //BrowsU_C
|
||||||
|
addBlendshapeToPose(sadPoses[0], 2, 0.6); //EyeSquint_L
|
||||||
|
addBlendshapeToPose(sadPoses[0], 3, 0.6); //EyeSquint_R
|
||||||
|
|
||||||
|
sadPoses[1] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(sadPoses[1], 26, 0.9); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(sadPoses[1], 27, 0.9); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(sadPoses[1], 16, 0.6); //BrowsU_C
|
||||||
|
addBlendshapeToPose(sadPoses[1], 2, 0.9); //EyeSquint_L
|
||||||
|
addBlendshapeToPose(sadPoses[1], 3, 0.9); //EyeSquint_R
|
||||||
|
|
||||||
|
sadPoses[2] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(sadPoses[2], 26, 1.0); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(sadPoses[2], 27, 1.0); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(sadPoses[2], 16, 0.1); //BrowsU_C
|
||||||
|
addBlendshapeToPose(sadPoses[2], 2, 1.0); //EyeSquint_L
|
||||||
|
addBlendshapeToPose(sadPoses[2], 3, 1.0); //EyeSquint_R
|
||||||
|
addBlendshapeToPose(sadPoses[2], 21, 0.3); //JawOpen
|
||||||
|
|
||||||
|
var facePoses = [];
|
||||||
|
facePoses[0] = happyPoses;
|
||||||
|
facePoses[1] = angryPoses;
|
||||||
|
facePoses[2] = sadPoses;
|
||||||
|
|
||||||
|
|
||||||
|
function addBlendShape(s) {
|
||||||
|
allBlendShapes[allBlendShapes.length] = s;
|
||||||
|
}
|
||||||
|
|
||||||
|
//It is imperative that the following blendshapes are all present and are in the correct order
|
||||||
|
addBlendShape("EyeBlink_L"); //0
|
||||||
|
addBlendShape("EyeBlink_R"); //1
|
||||||
|
addBlendShape("EyeSquint_L"); //2
|
||||||
|
addBlendShape("EyeSquint_R"); //3
|
||||||
|
addBlendShape("EyeDown_L"); //4
|
||||||
|
addBlendShape("EyeDown_R"); //5
|
||||||
|
addBlendShape("EyeIn_L"); //6
|
||||||
|
addBlendShape("EyeIn_R"); //7
|
||||||
|
addBlendShape("EyeOpen_L"); //8
|
||||||
|
addBlendShape("EyeOpen_R"); //9
|
||||||
|
addBlendShape("EyeOut_L"); //10
|
||||||
|
addBlendShape("EyeOut_R"); //11
|
||||||
|
addBlendShape("EyeUp_L"); //12
|
||||||
|
addBlendShape("EyeUp_R"); //13
|
||||||
|
addBlendShape("BrowsD_L"); //14
|
||||||
|
addBlendShape("BrowsD_R"); //15
|
||||||
|
addBlendShape("BrowsU_C"); //16
|
||||||
|
addBlendShape("BrowsU_L"); //17
|
||||||
|
addBlendShape("BrowsU_R"); //18
|
||||||
|
addBlendShape("JawFwd"); //19
|
||||||
|
addBlendShape("JawLeft"); //20
|
||||||
|
addBlendShape("JawOpen"); //21
|
||||||
|
addBlendShape("JawChew"); //22
|
||||||
|
addBlendShape("JawRight"); //23
|
||||||
|
addBlendShape("MouthLeft"); //24
|
||||||
|
addBlendShape("MouthRight"); //25
|
||||||
|
addBlendShape("MouthFrown_L"); //26
|
||||||
|
addBlendShape("MouthFrown_R"); //27
|
||||||
|
addBlendShape("MouthSmile_L"); //28
|
||||||
|
addBlendShape("MouthSmile_R"); //29
|
||||||
|
addBlendShape("MouthDimple_L"); //30
|
||||||
|
addBlendShape("MouthDimple_R"); //31
|
||||||
|
addBlendShape("LipsStretch_L"); //32
|
||||||
|
addBlendShape("LipsStretch_R"); //33
|
||||||
|
addBlendShape("LipsUpperClose"); //34
|
||||||
|
addBlendShape("LipsLowerClose"); //35
|
||||||
|
addBlendShape("LipsUpperUp"); //36
|
||||||
|
addBlendShape("LipsLowerDown"); //37
|
||||||
|
addBlendShape("LipsUpperOpen"); //38
|
||||||
|
addBlendShape("LipsLowerOpen"); //39
|
||||||
|
addBlendShape("LipsFunnel"); //40
|
||||||
|
addBlendShape("LipsPucker"); //41
|
||||||
|
addBlendShape("ChinLowerRaise"); //42
|
||||||
|
addBlendShape("ChinUpperRaise"); //43
|
||||||
|
addBlendShape("Sneer"); //44
|
||||||
|
addBlendShape("Puff"); //45
|
||||||
|
addBlendShape("CheekSquint_L"); //46
|
||||||
|
addBlendShape("CheekSquint_R"); //47
|
||||||
|
|
||||||
|
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||||
|
targetBlendCoefficient[i] = 0;
|
||||||
|
currentBlendCoefficient[i] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
function setRandomExpression() {
|
||||||
|
|
||||||
|
//Clear all expression data for current expression
|
||||||
|
if (currentExpression != -1) {
|
||||||
|
var expression = facePoses[avatarMood][currentExpression];
|
||||||
|
for (var i = 0; i < expression.blendShapes.length; i++) {
|
||||||
|
targetBlendCoefficient[expression.blendShapes[i].shapeIndex] = 0.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//Get a new current expression
|
||||||
|
currentExpression = Math.floor(Math.random() * facePoses[avatarMood].length);
|
||||||
|
var expression = facePoses[avatarMood][currentExpression];
|
||||||
|
for (var i = 0; i < expression.blendShapes.length; i++) {
|
||||||
|
targetBlendCoefficient[expression.blendShapes[i].shapeIndex] = expression.blendShapes[i].val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var expressionChangeSpeed = 0.1;
|
||||||
|
function updateBlendShapes(deltaTime) {
|
||||||
|
|
||||||
|
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||||
|
currentBlendCoefficient[i] += (targetBlendCoefficient[i] - currentBlendCoefficient[i]) * expressionChangeSpeed;
|
||||||
|
Avatar.setBlendshape(allBlendShapes[i], currentBlendCoefficient[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var BLINK_SPEED = 0.15;
|
||||||
|
var CHANCE_TO_BLINK = 0.0025;
|
||||||
|
var MAX_BLINK = 0.85;
|
||||||
|
var blink = 0.0;
|
||||||
|
var isBlinking = false;
|
||||||
|
function updateBlinking(deltaTime) {
|
||||||
|
if (isBlinking == false) {
|
||||||
|
if (Math.random() < CHANCE_TO_BLINK) {
|
||||||
|
isBlinking = true;
|
||||||
|
} else {
|
||||||
|
blink -= BLINK_SPEED;
|
||||||
|
if (blink < 0.0) blink = 0.0;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
blink += BLINK_SPEED;
|
||||||
|
if (blink > MAX_BLINK) {
|
||||||
|
blink = MAX_BLINK;
|
||||||
|
isBlinking = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentBlendCoefficient[0] = blink;
|
||||||
|
currentBlendCoefficient[1] = blink;
|
||||||
|
targetBlendCoefficient[0] = blink;
|
||||||
|
targetBlendCoefficient[1] = blink;
|
||||||
|
}
|
||||||
|
|
||||||
|
// *************************************************************************************
|
||||||
|
|
||||||
|
//Procedural walk animation using two keyframes
|
||||||
|
//We use a separate array for front and back joints
|
||||||
|
//Pitch, yaw, and roll for the joints
|
||||||
|
var rightAngles = [];
|
||||||
|
var leftAngles = [];
|
||||||
|
//for non mirrored joints such as the spine
|
||||||
|
var middleAngles = [];
|
||||||
|
|
||||||
|
//Actual joint mappings
|
||||||
|
var SHOULDER_JOINT_NUMBER = 15;
|
||||||
|
var ELBOW_JOINT_NUMBER = 16;
|
||||||
|
var JOINT_R_HIP = 1;
|
||||||
|
var JOINT_R_KNEE = 2;
|
||||||
|
var JOINT_L_HIP = 6;
|
||||||
|
var JOINT_L_KNEE = 7;
|
||||||
|
var JOINT_R_ARM = 15;
|
||||||
|
var JOINT_R_FOREARM = 16;
|
||||||
|
var JOINT_L_ARM = 39;
|
||||||
|
var JOINT_L_FOREARM = 40;
|
||||||
|
var JOINT_SPINE = 11;
|
||||||
|
var JOINT_R_FOOT = 3;
|
||||||
|
var JOINT_L_FOOT = 8;
|
||||||
|
var JOINT_R_TOE = 4;
|
||||||
|
var JOINT_L_TOE = 9;
|
||||||
|
|
||||||
|
// ******************************* Animation Is Defined Below *************************************
|
||||||
|
|
||||||
|
var NUM_FRAMES = 2;
|
||||||
|
for (var i = 0; i < NUM_FRAMES; i++) {
|
||||||
|
rightAngles[i] = [];
|
||||||
|
leftAngles[i] = [];
|
||||||
|
middleAngles[i] = [];
|
||||||
|
}
|
||||||
|
//Joint order for actual joint mappings, should be interleaved R,L,R,L,...S,S,S for R = right, L = left, S = single
|
||||||
|
var JOINT_ORDER = [];
|
||||||
|
//*** right / left joints ***
|
||||||
|
var HIP = 0;
|
||||||
|
JOINT_ORDER.push(JOINT_R_HIP);
|
||||||
|
JOINT_ORDER.push(JOINT_L_HIP);
|
||||||
|
var KNEE = 1;
|
||||||
|
JOINT_ORDER.push(JOINT_R_KNEE);
|
||||||
|
JOINT_ORDER.push(JOINT_L_KNEE);
|
||||||
|
var ARM = 2;
|
||||||
|
JOINT_ORDER.push(JOINT_R_ARM);
|
||||||
|
JOINT_ORDER.push(JOINT_L_ARM);
|
||||||
|
var FOREARM = 3;
|
||||||
|
JOINT_ORDER.push(JOINT_R_FOREARM);
|
||||||
|
JOINT_ORDER.push(JOINT_L_FOREARM);
|
||||||
|
var FOOT = 4;
|
||||||
|
JOINT_ORDER.push(JOINT_R_FOOT);
|
||||||
|
JOINT_ORDER.push(JOINT_L_FOOT);
|
||||||
|
var TOE = 5;
|
||||||
|
JOINT_ORDER.push(JOINT_R_TOE);
|
||||||
|
JOINT_ORDER.push(JOINT_L_TOE);
|
||||||
|
//*** middle joints ***
|
||||||
|
var SPINE = 0;
|
||||||
|
JOINT_ORDER.push(JOINT_SPINE);
|
||||||
|
|
||||||
|
//We have to store the angles so we can invert yaw and roll when making the animation
|
||||||
|
//symmetrical
|
||||||
|
|
||||||
|
//Front refers to leg, not arm.
|
||||||
|
//Legs Extending
|
||||||
|
rightAngles[0][HIP] = [30.0, 0.0, 8.0];
|
||||||
|
rightAngles[0][KNEE] = [-15.0, 0.0, 0.0];
|
||||||
|
rightAngles[0][ARM] = [85.0, -25.0, 0.0];
|
||||||
|
rightAngles[0][FOREARM] = [0.0, 0.0, -15.0];
|
||||||
|
rightAngles[0][FOOT] = [0.0, 0.0, 0.0];
|
||||||
|
rightAngles[0][TOE] = [0.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
leftAngles[0][HIP] = [-15, 0.0, 8.0];
|
||||||
|
leftAngles[0][KNEE] = [-26, 0.0, 0.0];
|
||||||
|
leftAngles[0][ARM] = [85.0, 20.0, 0.0];
|
||||||
|
leftAngles[0][FOREARM] = [10.0, 0.0, -25.0];
|
||||||
|
leftAngles[0][FOOT] = [-13.0, 0.0, 0.0];
|
||||||
|
leftAngles[0][TOE] = [34.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
middleAngles[0][SPINE] = [0.0, -15.0, 5.0];
|
||||||
|
|
||||||
|
//Legs Passing
|
||||||
|
rightAngles[1][HIP] = [6.0, 0.0, 8.0];
|
||||||
|
rightAngles[1][KNEE] = [-12.0, 0.0, 0.0];
|
||||||
|
rightAngles[1][ARM] = [85.0, 0.0, 0.0];
|
||||||
|
rightAngles[1][FOREARM] = [0.0, 0.0, -15.0];
|
||||||
|
rightAngles[1][FOOT] = [6.0, -8.0, 0.0];
|
||||||
|
rightAngles[1][TOE] = [0.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
leftAngles[1][HIP] = [10.0, 0.0, 8.0];
|
||||||
|
leftAngles[1][KNEE] = [-60.0, 0.0, 0.0];
|
||||||
|
leftAngles[1][ARM] = [85.0, 0.0, 0.0];
|
||||||
|
leftAngles[1][FOREARM] = [0.0, 0.0, -15.0];
|
||||||
|
leftAngles[1][FOOT] = [0.0, 0.0, 0.0];
|
||||||
|
leftAngles[1][TOE] = [0.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
middleAngles[1][SPINE] = [0.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
//Actual keyframes for the animation
|
||||||
|
var walkKeyFrames = procAnimAPI.generateKeyframes(rightAngles, leftAngles, middleAngles, NUM_FRAMES);
|
||||||
|
|
||||||
|
// ******************************* Animation Is Defined Above *************************************
|
||||||
|
|
||||||
|
// ********************************** Standing Key Frame ******************************************
|
||||||
|
//We don't have to do any mirroring or anything, since this is just a single pose.
|
||||||
|
var rightQuats = [];
|
||||||
|
var leftQuats = [];
|
||||||
|
var middleQuats = [];
|
||||||
|
|
||||||
|
rightQuats[HIP] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 7.0);
|
||||||
|
rightQuats[KNEE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0);
|
||||||
|
rightQuats[ARM] = Quat.fromPitchYawRollDegrees(85.0, 0.0, 0.0);
|
||||||
|
rightQuats[FOREARM] = Quat.fromPitchYawRollDegrees(0.0, 0.0, -10.0);
|
||||||
|
rightQuats[FOOT] = Quat.fromPitchYawRollDegrees(0.0, -8.0, 0.0);
|
||||||
|
rightQuats[TOE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0);
|
||||||
|
|
||||||
|
leftQuats[HIP] = Quat.fromPitchYawRollDegrees(0, 0.0, -7.0);
|
||||||
|
leftQuats[KNEE] = Quat.fromPitchYawRollDegrees(0, 0.0, 0.0);
|
||||||
|
leftQuats[ARM] = Quat.fromPitchYawRollDegrees(85.0, 0.0, 0.0);
|
||||||
|
leftQuats[FOREARM] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 10.0);
|
||||||
|
leftQuats[FOOT] = Quat.fromPitchYawRollDegrees(0.0, 8.0, 0.0);
|
||||||
|
leftQuats[TOE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0);
|
||||||
|
|
||||||
|
middleQuats[SPINE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0);
|
||||||
|
|
||||||
|
var standingKeyFrame = new procAnimAPI.KeyFrame(rightQuats, leftQuats, middleQuats);
|
||||||
|
|
||||||
|
// ************************************************************************************************
|
||||||
|
|
||||||
|
|
||||||
|
var currentFrame = 0;
|
||||||
|
|
||||||
|
var walkTime = 0.0;
|
||||||
|
|
||||||
|
var walkWheelRadius = 0.5;
|
||||||
|
var walkWheelRate = 2.0 * 3.141592 * walkWheelRadius / 8.0;
|
||||||
|
|
||||||
|
var avatarAcceleration = 0.75;
|
||||||
|
var avatarVelocity = 0.0;
|
||||||
|
var avatarMaxVelocity = 1.4;
|
||||||
|
|
||||||
|
function handleAnimation(deltaTime) {
|
||||||
|
|
||||||
|
updateBlinking(deltaTime);
|
||||||
|
updateBlendShapes(deltaTime);
|
||||||
|
|
||||||
|
if (Math.random() < 0.01) {
|
||||||
|
setRandomExpression();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (avatarVelocity == 0.0) {
|
||||||
|
walkTime = 0.0;
|
||||||
|
currentFrame = 0;
|
||||||
|
} else {
|
||||||
|
walkTime += avatarVelocity * deltaTime;
|
||||||
|
if (walkTime > walkWheelRate) {
|
||||||
|
walkTime = 0.0;
|
||||||
|
currentFrame++;
|
||||||
|
if (currentFrame % 2 == 1) {
|
||||||
|
playRandomFootstepSound();
|
||||||
|
}
|
||||||
|
if (currentFrame > 3) {
|
||||||
|
currentFrame = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var frame = walkKeyFrames[currentFrame];
|
||||||
|
|
||||||
|
var walkInterp = walkTime / walkWheelRate;
|
||||||
|
var animInterp = avatarVelocity / (avatarMaxVelocity / 1.3);
|
||||||
|
if (animInterp > 1.0) animInterp = 1.0;
|
||||||
|
|
||||||
|
for (var i = 0; i < JOINT_ORDER.length; i++) {
|
||||||
|
var walkJoint = procAnimAPI.deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], walkInterp);
|
||||||
|
var standJoint = standingKeyFrame.rotations[i];
|
||||||
|
var finalJoint = Quat.mix(standJoint, walkJoint, animInterp);
|
||||||
|
Avatar.setJointData(JOINT_ORDER[i], finalJoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function jumpWithLoudness(deltaTime) {
|
||||||
|
// potentially change pelvis height depending on trailing average loudness
|
||||||
|
|
||||||
|
pelvisOscillatorVelocity += deltaTime * Agent.lastReceivedAudioLoudness * 700.0 ;
|
||||||
|
|
||||||
|
pelvisOscillatorVelocity -= pelvisOscillatorPosition * 0.75;
|
||||||
|
pelvisOscillatorVelocity *= 0.97;
|
||||||
|
pelvisOscillatorPosition += deltaTime * pelvisOscillatorVelocity;
|
||||||
|
Avatar.headPitch = pelvisOscillatorPosition * 60.0;
|
||||||
|
|
||||||
|
var pelvisPosition = Avatar.position;
|
||||||
|
pelvisPosition.y = (Y_PELVIS - 0.35) + pelvisOscillatorPosition;
|
||||||
|
|
||||||
|
if (pelvisPosition.y < Y_PELVIS) {
|
||||||
|
pelvisPosition.y = Y_PELVIS;
|
||||||
|
} else if (pelvisPosition.y > Y_PELVIS + 1.0) {
|
||||||
|
pelvisPosition.y = Y_PELVIS + 1.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
Avatar.position = pelvisPosition;
|
||||||
|
}
|
||||||
|
|
||||||
|
var forcedMove = false;
|
||||||
|
|
||||||
|
var wasMovingLastFrame = false;
|
||||||
|
|
||||||
|
function handleHeadTurn() {
|
||||||
|
if (!isTurningHead && (Math.random() < CHANCE_OF_HEAD_TURNING)) {
|
||||||
|
targetHeadPitch = getRandomFloat(-PITCH_RANGE, PITCH_RANGE);
|
||||||
|
targetHeadYaw = getRandomFloat(-YAW_RANGE, YAW_RANGE);
|
||||||
|
isTurningHead = true;
|
||||||
|
} else {
|
||||||
|
Avatar.headPitch = Avatar.headPitch + (targetHeadPitch - Avatar.headPitch) * HEAD_TURN_RATE;
|
||||||
|
Avatar.headYaw = Avatar.headYaw + (targetHeadYaw - Avatar.headYaw) * HEAD_TURN_RATE;
|
||||||
|
if (Math.abs(Avatar.headPitch - targetHeadPitch) < STOP_TOLERANCE &&
|
||||||
|
Math.abs(Avatar.headYaw - targetHeadYaw) < STOP_TOLERANCE) {
|
||||||
|
isTurningHead = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function stopWalking() {
|
||||||
|
avatarVelocity = 0.0;
|
||||||
|
isMoving = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
var MAX_ATTEMPTS = 40;
|
||||||
|
function handleWalking(deltaTime) {
|
||||||
|
|
||||||
|
if (forcedMove || (!isMoving && Math.random() < CHANCE_OF_MOVING)) {
|
||||||
|
// Set new target location
|
||||||
|
|
||||||
|
var moveRange;
|
||||||
|
if (Math.random() < CHANCE_OF_BIG_MOVE) {
|
||||||
|
moveRange = MOVE_RANGE_BIG;
|
||||||
|
} else {
|
||||||
|
moveRange = MOVE_RANGE_SMALL;
|
||||||
|
}
|
||||||
|
|
||||||
|
//Keep trying new orientations if the desired target location is out of bounds
|
||||||
|
var attempts = 0;
|
||||||
|
do {
|
||||||
|
targetOrientation = Quat.multiply(Avatar.orientation, Quat.angleAxis(getRandomFloat(-TURN_RANGE, TURN_RANGE), { x:0, y:1, z:0 }));
|
||||||
|
var front = Quat.getFront(targetOrientation);
|
||||||
|
|
||||||
|
targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, moveRange)));
|
||||||
|
}
|
||||||
|
while ((targetPosition.x < X_MIN || targetPosition.x > X_MAX || targetPosition.z < Z_MIN || targetPosition.z > Z_MAX)
|
||||||
|
&& attempts < MAX_ATTEMPTS);
|
||||||
|
|
||||||
|
targetPosition.x = clamp(targetPosition.x, X_MIN, X_MAX);
|
||||||
|
targetPosition.z = clamp(targetPosition.z, Z_MIN, Z_MAX);
|
||||||
|
targetPosition.y = Y_PELVIS;
|
||||||
|
|
||||||
|
wasMovingLastFrame = true;
|
||||||
|
isMoving = true;
|
||||||
|
forcedMove = false;
|
||||||
|
} else if (isMoving) {
|
||||||
|
|
||||||
|
var targetVector = Vec3.subtract(targetPosition, Avatar.position);
|
||||||
|
var distance = Vec3.length(targetVector);
|
||||||
|
if (distance <= avatarVelocity * deltaTime) {
|
||||||
|
Avatar.position = targetPosition;
|
||||||
|
stopWalking();
|
||||||
|
} else {
|
||||||
|
var direction = Vec3.normalize(targetVector);
|
||||||
|
//Figure out if we should be slowing down
|
||||||
|
var t = avatarVelocity / avatarAcceleration;
|
||||||
|
var d = (avatarVelocity / 2.0) * t;
|
||||||
|
if (distance < d) {
|
||||||
|
avatarVelocity -= avatarAcceleration * deltaTime;
|
||||||
|
if (avatarVelocity <= 0) {
|
||||||
|
stopWalking();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
avatarVelocity += avatarAcceleration * deltaTime;
|
||||||
|
if (avatarVelocity > avatarMaxVelocity) avatarVelocity = avatarMaxVelocity;
|
||||||
|
}
|
||||||
|
Avatar.position = Vec3.sum(Avatar.position, Vec3.multiply(direction, avatarVelocity * deltaTime));
|
||||||
|
Avatar.orientation = Quat.mix(Avatar.orientation, targetOrientation, TURN_RATE);
|
||||||
|
|
||||||
|
wasMovingLastFrame = true;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleTalking() {
|
||||||
|
if (Math.random() < CHANCE_OF_SOUND) {
|
||||||
|
playRandomSound();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function changePelvisHeight(newHeight) {
|
||||||
|
var newPosition = Avatar.position;
|
||||||
|
newPosition.y = newHeight;
|
||||||
|
Avatar.position = newPosition;
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateBehavior(deltaTime) {
|
||||||
|
|
||||||
|
if (AvatarList.containsAvatarWithDisplayName("mrdj")) {
|
||||||
|
if (wasMovingLastFrame) {
|
||||||
|
isMoving = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have a DJ, shouldn't we be dancing?
|
||||||
|
jumpWithLoudness(deltaTime);
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// no DJ, let's just chill on the dancefloor - randomly walking and talking
|
||||||
|
handleHeadTurn();
|
||||||
|
handleAnimation(deltaTime);
|
||||||
|
handleWalking(deltaTime);
|
||||||
|
handleTalking();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Script.update.connect(updateBehavior);
|
|
@ -64,7 +64,7 @@ Audio::Audio(QObject* parent) :
|
||||||
_audioOutput(NULL),
|
_audioOutput(NULL),
|
||||||
_desiredOutputFormat(),
|
_desiredOutputFormat(),
|
||||||
_outputFormat(),
|
_outputFormat(),
|
||||||
_outputDevice(NULL),
|
//_outputDevice(NULL),
|
||||||
_numOutputCallbackBytes(0),
|
_numOutputCallbackBytes(0),
|
||||||
_loopbackAudioOutput(NULL),
|
_loopbackAudioOutput(NULL),
|
||||||
_loopbackOutputDevice(NULL),
|
_loopbackOutputDevice(NULL),
|
||||||
|
@ -76,7 +76,7 @@ Audio::Audio(QObject* parent) :
|
||||||
// slower than real time (or at least the desired sample rate). If you increase the size of the ring buffer, then it
|
// slower than real time (or at least the desired sample rate). If you increase the size of the ring buffer, then it
|
||||||
// this delay will slowly add up and the longer someone runs, they more delayed their audio will be.
|
// this delay will slowly add up and the longer someone runs, they more delayed their audio will be.
|
||||||
_inputRingBuffer(0),
|
_inputRingBuffer(0),
|
||||||
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 0, 0, true),
|
_receivedAudioStream(0, 100, true, 0, 0, true),
|
||||||
_isStereoInput(false),
|
_isStereoInput(false),
|
||||||
_averagedLatency(0.0),
|
_averagedLatency(0.0),
|
||||||
_lastInputLoudness(0),
|
_lastInputLoudness(0),
|
||||||
|
@ -115,8 +115,8 @@ Audio::Audio(QObject* parent) :
|
||||||
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_lastSentAudioPacket(0),
|
_lastSentAudioPacket(0),
|
||||||
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
|
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS),
|
||||||
|
_audioOutputIODevice(*this)
|
||||||
{
|
{
|
||||||
// clear the array of locally injected samples
|
// clear the array of locally injected samples
|
||||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
|
@ -128,6 +128,8 @@ void Audio::init(QGLWidget *parent) {
|
||||||
_micTextureId = parent->bindTexture(QImage(Application::resourcesPath() + "images/mic.svg"));
|
_micTextureId = parent->bindTexture(QImage(Application::resourcesPath() + "images/mic.svg"));
|
||||||
_muteTextureId = parent->bindTexture(QImage(Application::resourcesPath() + "images/mic-mute.svg"));
|
_muteTextureId = parent->bindTexture(QImage(Application::resourcesPath() + "images/mic-mute.svg"));
|
||||||
_boxTextureId = parent->bindTexture(QImage(Application::resourcesPath() + "images/audio-box.svg"));
|
_boxTextureId = parent->bindTexture(QImage(Application::resourcesPath() + "images/audio-box.svg"));
|
||||||
|
|
||||||
|
connect(&_receivedAudioStream, &RawMixedAudioStream::processSamples, this, &Audio::receivedAudioStreamProcessSamples, Qt::DirectConnection);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::reset() {
|
void Audio::reset() {
|
||||||
|
@ -724,12 +726,90 @@ void Audio::handleAudioInput() {
|
||||||
delete[] inputAudioSamples;
|
delete[] inputAudioSamples;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_receivedAudioStream.getPacketsReceived() > 0) {
|
/*if (_receivedAudioStream.getPacketsReceived() > 0) {
|
||||||
pushAudioToOutput();
|
pushAudioToOutput();
|
||||||
|
}*/
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::receivedAudioStreamProcessSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||||
|
|
||||||
|
printf("receivedAudioStreamProcessSamples()\n");
|
||||||
|
|
||||||
|
static const int numNetworkOutputSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
||||||
|
static const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
||||||
|
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
||||||
|
|
||||||
|
|
||||||
|
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||||
|
|
||||||
|
int16_t* receivedSamples = new int16_t[numNetworkOutputSamples];
|
||||||
|
if (_processSpatialAudio) {
|
||||||
|
unsigned int sampleTime = _spatialAudioStart;
|
||||||
|
QByteArray buffer = inputBuffer.left(numNetworkOutputSamples * sizeof(int16_t));
|
||||||
|
|
||||||
|
// Accumulate direct transmission of audio from sender to receiver
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
||||||
|
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||||
|
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send audio off for spatial processing
|
||||||
|
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||||
|
|
||||||
|
// copy the samples we'll resample from the spatial audio ring buffer - this also
|
||||||
|
// pushes the read pointer of the spatial audio ring buffer forwards
|
||||||
|
_spatialAudioRingBuffer.readSamples(receivedSamples, numNetworkOutputSamples);
|
||||||
|
|
||||||
|
// Advance the start point for the next packet of audio to arrive
|
||||||
|
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
|
||||||
|
} else {
|
||||||
|
// copy the samples we'll resample from the ring buffer - this also
|
||||||
|
// pushes the read pointer of the ring buffer forwards
|
||||||
|
//receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples);
|
||||||
|
|
||||||
|
memcpy(receivedSamples, inputBuffer.data(), numNetworkOutputSamples * sizeof(int16_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copy the packet from the RB to the output
|
||||||
|
linearResampling(receivedSamples,
|
||||||
|
(int16_t*)outputBuffer.data(),
|
||||||
|
numNetworkOutputSamples,
|
||||||
|
numDeviceOutputSamples,
|
||||||
|
_desiredOutputFormat, _outputFormat);
|
||||||
|
|
||||||
|
/*if (_outputDevice) {
|
||||||
|
_outputDevice->write(outputBuffer);
|
||||||
|
}*/
|
||||||
|
printf("\t outputBuffer now size %d\n", outputBuffer.size());
|
||||||
|
|
||||||
|
if (_scopeEnabled && !_scopeEnabledPause) {
|
||||||
|
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
|
||||||
|
int16_t* samples = receivedSamples;
|
||||||
|
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
|
||||||
|
|
||||||
|
unsigned int audioChannel = 0;
|
||||||
|
addBufferToScope(
|
||||||
|
_scopeOutputLeft,
|
||||||
|
_scopeOutputOffset,
|
||||||
|
samples, audioChannel, numAudioChannels);
|
||||||
|
|
||||||
|
audioChannel = 1;
|
||||||
|
addBufferToScope(
|
||||||
|
_scopeOutputRight,
|
||||||
|
_scopeOutputOffset,
|
||||||
|
samples, audioChannel, numAudioChannels);
|
||||||
|
|
||||||
|
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
|
||||||
|
_scopeOutputOffset %= _samplesPerScope;
|
||||||
|
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete[] receivedSamples;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||||
|
|
||||||
if (_audioOutput) {
|
if (_audioOutput) {
|
||||||
// Audio output must exist and be correctly set up if we're going to process received audio
|
// Audio output must exist and be correctly set up if we're going to process received audio
|
||||||
processReceivedAudio(audioByteArray);
|
processReceivedAudio(audioByteArray);
|
||||||
|
@ -738,6 +818,9 @@ void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||||
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||||
|
|
||||||
int numBytesPacketHeader = numBytesForPacketHeader(packet);
|
int numBytesPacketHeader = numBytesForPacketHeader(packet);
|
||||||
|
@ -917,7 +1000,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
|
|
||||||
void Audio::pushAudioToOutput() {
|
void Audio::pushAudioToOutput() {
|
||||||
|
|
||||||
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
/*if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
||||||
// the audio output has no samples to play. set the downstream audio to starved so that it
|
// the audio output has no samples to play. set the downstream audio to starved so that it
|
||||||
// refills to its desired size before pushing frames
|
// refills to its desired size before pushing frames
|
||||||
_receivedAudioStream.setToStarved();
|
_receivedAudioStream.setToStarved();
|
||||||
|
@ -1011,7 +1094,7 @@ void Audio::pushAudioToOutput() {
|
||||||
}
|
}
|
||||||
|
|
||||||
delete[] receivedSamples;
|
delete[] receivedSamples;
|
||||||
}
|
}*/
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
|
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
|
||||||
|
@ -1631,6 +1714,11 @@ void Audio::renderLineStrip(const float* color, int x, int y, int n, int offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Audio::outputFormatChanged() {
|
||||||
|
int deviceOutputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * _outputFormat.channelCount() * _outputFormat.sampleRate() / _desiredOutputFormat.sampleRate();
|
||||||
|
_receivedAudioStream.resizeFrame(deviceOutputFrameSize);
|
||||||
|
}
|
||||||
|
|
||||||
bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
|
bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
|
||||||
bool supportedFormat = false;
|
bool supportedFormat = false;
|
||||||
|
|
||||||
|
@ -1681,7 +1769,7 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
|
||||||
// cleanup any previously initialized device
|
// cleanup any previously initialized device
|
||||||
if (_audioOutput) {
|
if (_audioOutput) {
|
||||||
_audioOutput->stop();
|
_audioOutput->stop();
|
||||||
_outputDevice = NULL;
|
//_outputDevice = NULL;
|
||||||
|
|
||||||
delete _audioOutput;
|
delete _audioOutput;
|
||||||
_audioOutput = NULL;
|
_audioOutput = NULL;
|
||||||
|
@ -1703,13 +1791,21 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
|
||||||
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
||||||
qDebug() << "The format to be used for audio output is" << _outputFormat;
|
qDebug() << "The format to be used for audio output is" << _outputFormat;
|
||||||
|
|
||||||
const int AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 10;
|
outputFormatChanged();
|
||||||
|
|
||||||
|
const int AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 3;
|
||||||
|
|
||||||
// setup our general output device for audio-mixer audio
|
// setup our general output device for audio-mixer audio
|
||||||
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS));
|
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS));
|
||||||
qDebug() << "Ring Buffer capacity in frames: " << AUDIO_OUTPUT_BUFFER_SIZE_FRAMES;
|
|
||||||
_outputDevice = _audioOutput->start();
|
printf("\n\n");
|
||||||
|
qDebug() << "Ring Buffer capacity in frames: " << (float)_outputFormat.durationForBytes(_audioOutput->bufferSize()) / (float)BUFFER_SEND_INTERVAL_USECS;
|
||||||
|
printf("\n\n");
|
||||||
|
//_outputDevice = _audioOutput->start();
|
||||||
|
|
||||||
|
_audioOutputIODevice.start();
|
||||||
|
_audioOutput->start(&_audioOutputIODevice);
|
||||||
|
|
||||||
// setup a loopback audio output device
|
// setup a loopback audio output device
|
||||||
_loopbackAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_loopbackAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
|
@ -1779,3 +1875,55 @@ float Audio::getInputRingBufferMsecsAvailable() const {
|
||||||
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
return msecsInInputRingBuffer;
|
return msecsInInputRingBuffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qint64 Audio::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
||||||
|
printf("readData() request %d bytes\n", maxSize);
|
||||||
|
/*
|
||||||
|
float framesRequested = (float)_parent._outputFormat.durationForBytes(maxSize) / (float)BUFFER_SEND_INTERVAL_USECS;
|
||||||
|
|
||||||
|
if (framesRequested > 67.0f) {
|
||||||
|
maxSize /= 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
quint64 now = usecTimestampNow();
|
||||||
|
printf("%llu\n", now - _lastReadTime);
|
||||||
|
_lastReadTime = now;
|
||||||
|
|
||||||
|
int16_t* buffer;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
buffer = (int16_t*)data;
|
||||||
|
|
||||||
|
for (int i = 0; i < maxSize / 2; i++) {
|
||||||
|
*(buffer++) = (int16_t)randIntInRange(0, 10000);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
return 2 * (maxSize / 2);
|
||||||
|
*/
|
||||||
|
|
||||||
|
int samplesRequested = maxSize / sizeof(int16_t);
|
||||||
|
|
||||||
|
printf("requesting %d samples\n", samplesRequested);
|
||||||
|
|
||||||
|
int samplesPopped;
|
||||||
|
int bytesWritten;
|
||||||
|
if ((samplesPopped = _parent._receivedAudioStream.popSamples(samplesRequested, false, false)) > 0) {
|
||||||
|
printf("\t pop succeeded: %d samples\n", samplesPopped);
|
||||||
|
|
||||||
|
AudioRingBuffer::ConstIterator lastPopOutput = _parent._receivedAudioStream.getLastPopOutput();
|
||||||
|
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
|
||||||
|
|
||||||
|
bytesWritten = samplesPopped * sizeof(int16_t);
|
||||||
|
} else {
|
||||||
|
printf("\t pop failed\n");
|
||||||
|
memset(data, 0, maxSize);
|
||||||
|
bytesWritten = maxSize;
|
||||||
|
}
|
||||||
|
printf("\t wrote %d bytes\n", bytesWritten);
|
||||||
|
|
||||||
|
return bytesWritten;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
#include <AbstractAudioInterface.h>
|
#include <AbstractAudioInterface.h>
|
||||||
#include <StdDev.h>
|
#include <StdDev.h>
|
||||||
|
|
||||||
#include "MixedAudioStream.h"
|
#include "RawMixedAudioStream.h"
|
||||||
|
|
||||||
static const int NUM_AUDIO_CHANNELS = 2;
|
static const int NUM_AUDIO_CHANNELS = 2;
|
||||||
|
|
||||||
|
@ -45,6 +45,23 @@ class QIODevice;
|
||||||
class Audio : public AbstractAudioInterface {
|
class Audio : public AbstractAudioInterface {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
class AudioOutputIODevice : public QIODevice {
|
||||||
|
public:
|
||||||
|
AudioOutputIODevice(Audio& parent) : _parent(parent) {};
|
||||||
|
|
||||||
|
void start() { open(QIODevice::ReadOnly); }
|
||||||
|
void stop() { close(); }
|
||||||
|
|
||||||
|
qint64 readData(char * data, qint64 maxSize);
|
||||||
|
qint64 writeData(const char * data, qint64 maxSize) { return 0; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
Audio& _parent;
|
||||||
|
quint64 _lastReadTime;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
// setup for audio I/O
|
// setup for audio I/O
|
||||||
Audio(QObject* parent = 0);
|
Audio(QObject* parent = 0);
|
||||||
|
|
||||||
|
@ -94,6 +111,7 @@ public slots:
|
||||||
void addReceivedAudioToStream(const QByteArray& audioByteArray);
|
void addReceivedAudioToStream(const QByteArray& audioByteArray);
|
||||||
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
||||||
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||||
|
void receivedAudioStreamProcessSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
||||||
void handleAudioInput();
|
void handleAudioInput();
|
||||||
void reset();
|
void reset();
|
||||||
void resetStats();
|
void resetStats();
|
||||||
|
@ -133,7 +151,10 @@ signals:
|
||||||
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
|
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
|
||||||
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
|
|
||||||
|
private:
|
||||||
|
void outputFormatChanged();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
QByteArray firstInputFrame;
|
QByteArray firstInputFrame;
|
||||||
|
@ -146,14 +167,14 @@ private:
|
||||||
QAudioOutput* _audioOutput;
|
QAudioOutput* _audioOutput;
|
||||||
QAudioFormat _desiredOutputFormat;
|
QAudioFormat _desiredOutputFormat;
|
||||||
QAudioFormat _outputFormat;
|
QAudioFormat _outputFormat;
|
||||||
QIODevice* _outputDevice;
|
//QIODevice* _outputDevice;
|
||||||
int _numOutputCallbackBytes;
|
int _numOutputCallbackBytes;
|
||||||
QAudioOutput* _loopbackAudioOutput;
|
QAudioOutput* _loopbackAudioOutput;
|
||||||
QIODevice* _loopbackOutputDevice;
|
QIODevice* _loopbackOutputDevice;
|
||||||
QAudioOutput* _proceduralAudioOutput;
|
QAudioOutput* _proceduralAudioOutput;
|
||||||
QIODevice* _proceduralOutputDevice;
|
QIODevice* _proceduralOutputDevice;
|
||||||
AudioRingBuffer _inputRingBuffer;
|
AudioRingBuffer _inputRingBuffer;
|
||||||
MixedAudioStream _receivedAudioStream;
|
RawMixedAudioStream _receivedAudioStream;
|
||||||
bool _isStereoInput;
|
bool _isStereoInput;
|
||||||
|
|
||||||
QString _inputAudioDeviceName;
|
QString _inputAudioDeviceName;
|
||||||
|
@ -282,6 +303,8 @@ private:
|
||||||
|
|
||||||
quint64 _lastSentAudioPacket;
|
quint64 _lastSentAudioPacket;
|
||||||
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
|
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
|
||||||
|
|
||||||
|
AudioOutputIODevice _audioOutputIODevice;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,12 @@ void InboundAudioStream::clearBuffer() {
|
||||||
_currentJitterBufferFrames = 0;
|
_currentJitterBufferFrames = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
|
||||||
int InboundAudioStream::parseData(const QByteArray& packet) {
|
int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
|
|
||||||
PacketType packetType = packetTypeForPacket(packet);
|
PacketType packetType = packetTypeForPacket(packet);
|
||||||
QUuid senderUUID = uuidFromPacketHeader(packet);
|
QUuid senderUUID = uuidFromPacketHeader(packet);
|
||||||
|
|
||||||
|
@ -82,7 +87,10 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
// parse sequence number and track it
|
// parse sequence number and track it
|
||||||
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
|
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
|
||||||
readBytes += sizeof(quint16);
|
readBytes += sizeof(quint16);
|
||||||
SequenceNumberStats::ArrivalInfo arrivalInfo = frameReceivedUpdateNetworkStats(sequence, senderUUID);
|
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
|
||||||
|
|
||||||
|
frameReceivedUpdateTimingStats();
|
||||||
|
|
||||||
|
|
||||||
// TODO: handle generalized silent packet here?????
|
// TODO: handle generalized silent packet here?????
|
||||||
|
|
||||||
|
@ -130,32 +138,78 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
return readBytes;
|
return readBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) {
|
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveOnFail) {
|
||||||
int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples();
|
int samplesPopped = 0;
|
||||||
|
int samplesAvailable = _ringBuffer.samplesAvailable();
|
||||||
if (_isStarved) {
|
if (_isStarved) {
|
||||||
// we're still refilling; don't pop
|
// we're still refilling; don't pop
|
||||||
_consecutiveNotMixedCount++;
|
_consecutiveNotMixedCount++;
|
||||||
_lastPopSucceeded = false;
|
_lastPopSucceeded = false;
|
||||||
} else {
|
} else {
|
||||||
if (_ringBuffer.samplesAvailable() >= numSamplesRequested) {
|
if (samplesAvailable >= maxSamples) {
|
||||||
// we have enough samples to pop, so we're good to mix
|
// we have enough samples to pop, so we're good to mix
|
||||||
_lastPopOutput = _ringBuffer.nextOutput();
|
popSamplesNoCheck(maxSamples);
|
||||||
_ringBuffer.shiftReadPosition(numSamplesRequested);
|
samplesPopped = maxSamples;
|
||||||
framesAvailableChanged();
|
|
||||||
|
|
||||||
_hasStarted = true;
|
|
||||||
_lastPopSucceeded = true;
|
|
||||||
} else {
|
} else {
|
||||||
// we don't have enough samples, so set this stream to starve
|
// we don't have enough frames, so set this stream to starve
|
||||||
// if starveOnFail is true
|
// if starveOnFail is true
|
||||||
if (starveOnFail) {
|
if (starveOnFail) {
|
||||||
starved();
|
starved();
|
||||||
_consecutiveNotMixedCount++;
|
_consecutiveNotMixedCount++;
|
||||||
}
|
}
|
||||||
_lastPopSucceeded = false;
|
|
||||||
|
if (!allOrNothing && samplesAvailable > 0) {
|
||||||
|
popSamplesNoCheck(samplesAvailable);
|
||||||
|
samplesPopped = samplesAvailable;
|
||||||
|
} else {
|
||||||
|
_lastPopSucceeded = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return _lastPopSucceeded;
|
return samplesPopped;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing, bool starveOnFail) {
|
||||||
|
int framesPopped = 0;
|
||||||
|
int framesAvailable = _ringBuffer.framesAvailable();
|
||||||
|
if (_isStarved) {
|
||||||
|
// we're still refilling; don't pop
|
||||||
|
_consecutiveNotMixedCount++;
|
||||||
|
_lastPopSucceeded = false;
|
||||||
|
} else {
|
||||||
|
if (framesAvailable >= maxFrames) {
|
||||||
|
// we have enough samples to pop, so we're good to mix
|
||||||
|
popSamplesNoCheck(maxFrames * _ringBuffer.getNumFrameSamples());
|
||||||
|
framesPopped = maxFrames;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// we don't have enough frames, so set this stream to starve
|
||||||
|
// if starveOnFail is true
|
||||||
|
if (starveOnFail) {
|
||||||
|
starved();
|
||||||
|
_consecutiveNotMixedCount++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!allOrNothing && framesAvailable > 0) {
|
||||||
|
popSamplesNoCheck(framesAvailable * _ringBuffer.getNumFrameSamples());
|
||||||
|
framesPopped = framesAvailable;
|
||||||
|
} else {
|
||||||
|
_lastPopSucceeded = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return framesPopped;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::popSamplesNoCheck(int samples) {
|
||||||
|
_lastPopOutput = _ringBuffer.nextOutput();
|
||||||
|
_ringBuffer.shiftReadPosition(samples);
|
||||||
|
framesAvailableChanged();
|
||||||
|
|
||||||
|
_hasStarted = true;
|
||||||
|
_lastPopSucceeded = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::framesAvailableChanged() {
|
void InboundAudioStream::framesAvailableChanged() {
|
||||||
|
@ -204,9 +258,7 @@ int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
|
||||||
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
|
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
|
||||||
}
|
}
|
||||||
|
|
||||||
SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID) {
|
void InboundAudioStream::frameReceivedUpdateTimingStats() {
|
||||||
// track the sequence number we received
|
|
||||||
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequenceNumber, senderUUID);
|
|
||||||
|
|
||||||
// update our timegap stats and desired jitter buffer frames if necessary
|
// update our timegap stats and desired jitter buffer frames if necessary
|
||||||
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
||||||
|
@ -243,8 +295,6 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_lastFrameReceivedTime = now;
|
_lastFrameReceivedTime = now;
|
||||||
|
|
||||||
return arrivalInfo;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {
|
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {
|
||||||
|
|
|
@ -63,8 +63,8 @@ public:
|
||||||
|
|
||||||
virtual int parseData(const QByteArray& packet);
|
virtual int parseData(const QByteArray& packet);
|
||||||
|
|
||||||
|
int popFrames(int maxFrames, bool allOrNothing, bool starveOnFail = true);
|
||||||
bool popFrames(int numFrames, bool starveOnFail = true);
|
int popSamples(int maxSamples, bool allOrNothing, bool starveOnFail = true);
|
||||||
|
|
||||||
bool lastPopSucceeded() const { return _lastPopSucceeded; };
|
bool lastPopSucceeded() const { return _lastPopSucceeded; };
|
||||||
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
|
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
|
||||||
|
@ -81,6 +81,8 @@ public:
|
||||||
|
|
||||||
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
|
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
|
||||||
|
|
||||||
|
void resizeFrame(int numFrameSamples) { _ringBuffer.resizeForFrameSize(numFrameSamples); }
|
||||||
|
|
||||||
virtual AudioStreamStats getAudioStreamStats() const;
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
/// returns the desired number of jitter buffer frames under the dyanmic jitter buffers scheme
|
/// returns the desired number of jitter buffer frames under the dyanmic jitter buffers scheme
|
||||||
|
@ -113,11 +115,12 @@ public:
|
||||||
private:
|
private:
|
||||||
void starved();
|
void starved();
|
||||||
|
|
||||||
SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID);
|
void frameReceivedUpdateTimingStats();
|
||||||
int clampDesiredJitterBufferFramesValue(int desired) const;
|
int clampDesiredJitterBufferFramesValue(int desired) const;
|
||||||
|
|
||||||
int writeSamplesForDroppedPackets(int numSamples);
|
int writeSamplesForDroppedPackets(int numSamples);
|
||||||
|
|
||||||
|
void popSamplesNoCheck(int samples);
|
||||||
void framesAvailableChanged();
|
void framesAvailableChanged();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -129,8 +132,9 @@ protected:
|
||||||
/// how many audio samples this packet contains
|
/// how many audio samples this packet contains
|
||||||
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
|
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
|
||||||
|
|
||||||
/// parses the audio data in the network packet
|
/// parses the audio data in the network packet.
|
||||||
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0;
|
/// default implementation assumes packet contains raw audio samples after stream properties
|
||||||
|
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
||||||
|
|
||||||
int writeDroppableSilentSamples(int numSilentSamples);
|
int writeDroppableSilentSamples(int numSilentSamples);
|
||||||
|
|
||||||
|
|
|
@ -58,10 +58,6 @@ int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray
|
||||||
return packetStream.device()->pos();
|
return packetStream.device()->pos();
|
||||||
}
|
}
|
||||||
|
|
||||||
int InjectedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
|
||||||
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
AudioStreamStats InjectedAudioStream::getAudioStreamStats() const {
|
AudioStreamStats InjectedAudioStream::getAudioStreamStats() const {
|
||||||
AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats();
|
AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats();
|
||||||
streamStats._streamIdentifier = _streamIdentifier;
|
streamStats._streamIdentifier = _streamIdentifier;
|
||||||
|
|
|
@ -32,7 +32,6 @@ private:
|
||||||
|
|
||||||
AudioStreamStats getAudioStreamStats() const;
|
AudioStreamStats getAudioStreamStats() const;
|
||||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
|
||||||
|
|
||||||
const QUuid _streamIdentifier;
|
const QUuid _streamIdentifier;
|
||||||
float _radius;
|
float _radius;
|
||||||
|
|
|
@ -11,7 +11,3 @@ int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& p
|
||||||
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int MixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
|
||||||
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
|
||||||
}
|
|
||||||
|
|
|
@ -23,7 +23,6 @@ public:
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_MixedAudioStream_h
|
#endif // hifi_MixedAudioStream_h
|
||||||
|
|
24
libraries/audio/src/RawMixedAudioStream.cpp
Normal file
24
libraries/audio/src/RawMixedAudioStream.cpp
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
|
||||||
|
#include "RawMixedAudioStream.h"
|
||||||
|
|
||||||
|
RawMixedAudioStream ::RawMixedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
|
||||||
|
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int RawMixedAudioStream ::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||||
|
// mixed audio packets do not have any info between the seq num and the audio data.
|
||||||
|
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int RawMixedAudioStream ::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
|
||||||
|
QByteArray outputBuffer;
|
||||||
|
emit processSamples(packetAfterStreamProperties, outputBuffer);
|
||||||
|
|
||||||
|
int bytesWritten = _ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
|
||||||
|
printf("wrote %d samples to ringbuffer\n", bytesWritten / 2);
|
||||||
|
|
||||||
|
return packetAfterStreamProperties.size();
|
||||||
|
}
|
32
libraries/audio/src/RawMixedAudioStream.h
Normal file
32
libraries/audio/src/RawMixedAudioStream.h
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
//
|
||||||
|
// RawMixedAudioStream.h
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 6/5/13.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_RawMixedAudioStream_h
|
||||||
|
#define hifi_RawMixedAudioStream_h
|
||||||
|
|
||||||
|
#include "InboundAudioStream.h"
|
||||||
|
#include "PacketHeaders.h"
|
||||||
|
|
||||||
|
class RawMixedAudioStream : public InboundAudioStream {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
RawMixedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
|
||||||
|
|
||||||
|
signals:
|
||||||
|
|
||||||
|
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
|
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_RawMixedAudioStream_h
|
Loading…
Reference in a new issue