clean up AudioRingBuffer by subclassing

This commit is contained in:
Stephen Birarda 2013-06-05 11:51:21 -07:00
parent 6353940bf7
commit 4cb00ad54b
12 changed files with 429 additions and 345 deletions

View file

@ -0,0 +1,20 @@
//
// AvatarAudioRingBuffer.cpp
// hifi
//
// Created by Stephen Birarda on 6/5/13.
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
//
#include "AvatarAudioRingBuffer.h"
AvatarAudioRingBuffer::AvatarAudioRingBuffer() : _freeVerbs() {
}
AvatarAudioRingBuffer::~AvatarAudioRingBuffer() {
// enumerate the freeVerbs map and delete the FreeVerb objects
for (FreeVerbAgentMap::iterator verbIterator = _freeVerbs.begin(); verbIterator != _freeVerbs.end(); verbIterator++) {
delete verbIterator->second;
}
}

View file

@ -0,0 +1,33 @@
//
// AvatarAudioRingBuffer.h
// hifi
//
// Created by Stephen Birarda on 6/5/13.
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
//
#ifndef __hifi__AvatarAudioRingBuffer__
#define __hifi__AvatarAudioRingBuffer__
#include <Stk.h>
#include <FreeVerb.h>
#include "PositionalAudioRingBuffer.h"
typedef std::map<uint16_t, stk::FreeVerb*> FreeVerbAgentMap;
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
public:
AvatarAudioRingBuffer();
~AvatarAudioRingBuffer();
FreeVerbAgentMap& getFreeVerbs() { return _freeVerbs; }
private:
// disallow copying of AvatarAudioRingBuffer objects
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&);
FreeVerbAgentMap _freeVerbs;
};
#endif /* defined(__hifi__AvatarAudioRingBuffer__) */

View file

@ -0,0 +1,17 @@
//
// InjectedAudioRingBuffer.cpp
// hifi
//
// Created by Stephen Birarda on 6/5/13.
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
//
#include "InjectedAudioRingBuffer.h"
InjectedAudioRingBuffer::InjectedAudioRingBuffer() :
_radius(0.0f),
_attenuationRatio(0),
_streamIdentifier()
{
}

View file

@ -0,0 +1,33 @@
//
// InjectedAudioRingBuffer.h
// hifi
//
// Created by Stephen Birarda on 6/5/13.
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
//
#ifndef __hifi__InjectedAudioRingBuffer__
#define __hifi__InjectedAudioRingBuffer__
#include <AudioInjector.h>
#include "PositionalAudioRingBuffer.h"
class InjectedAudioRingBuffer : public PositionalAudioRingBuffer {
public:
InjectedAudioRingBuffer();
float getRadius() const { return _radius; }
float getAttenuationRatio() const { return _attenuationRatio; }
const unsigned char* getStreamIdentifier() const { return _streamIdentifier; }
private:
// disallow copying of InjectedAudioRingBuffer objects
InjectedAudioRingBuffer(const InjectedAudioRingBuffer&);
InjectedAudioRingBuffer& operator= (const InjectedAudioRingBuffer&);
float _radius;
float _attenuationRatio;
unsigned char _streamIdentifier[STREAM_IDENTIFIER_NUM_BYTES];
};
#endif /* defined(__hifi__InjectedAudioRingBuffer__) */

View file

@ -0,0 +1,38 @@
//
// PositionalAudioRingBuffer.cpp
// hifi
//
// Created by Stephen Birarda on 6/5/13.
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
//
#include "PositionalAudioRingBuffer.h"
PositionalAudioRingBuffer::PositionalAudioRingBuffer() :
AudioRingBuffer(false),
_position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
_shouldLoopbackForAgent(false),
_wasAddedToMix(false)
{
}
bool PositionalAudioRingBuffer::shouldBeAddedToMix(int numJitterBufferSamples) {
if (_endOfLastWrite) {
if (!_isStarted && diffLastWriteNextOutput() <= BUFFER_LENGTH_SAMPLES_PER_CHANNEL + numJitterBufferSamples) {
printf("Buffer held back\n");
return false;
} else if (diffLastWriteNextOutput() < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
printf("Buffer starved.\n");
_isStarted = false;
return false;
} else {
// good buffer, add this to the mix
_isStarted = true;
return true;
}
}
return false;
}

View file

@ -0,0 +1,41 @@
//
// PositionalAudioRingBuffer.h
// hifi
//
// Created by Stephen Birarda on 6/5/13.
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
//
#ifndef __hifi__PositionalAudioRingBuffer__
#define __hifi__PositionalAudioRingBuffer__
#include <glm/gtx/quaternion.hpp>
#include <AudioRingBuffer.h>
class PositionalAudioRingBuffer : public AudioRingBuffer {
public:
PositionalAudioRingBuffer();
bool shouldBeAddedToMix(int numJitterBufferSamples);
bool wasAddedToMix() const { return _wasAddedToMix; }
void setWasAddedToMix(bool wasAddedToMix) { _wasAddedToMix = wasAddedToMix; }
const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; }
bool shouldLoopbackForAgent() const { return _shouldLoopbackForAgent; }
protected:
// disallow copying of PositionalAudioRingBuffer objects
PositionalAudioRingBuffer(const PositionalAudioRingBuffer&);
PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&);
glm::vec3 _position;
glm::quat _orientation;
bool _shouldLoopbackForAgent;
bool _wasAddedToMix;
};
#endif /* defined(__hifi__PositionalAudioRingBuffer__) */

View file

@ -27,7 +27,9 @@
#include <Stk.h> #include <Stk.h>
#include <FreeVerb.h> #include <FreeVerb.h>
#include "AudioRingBuffer.h" #include "InjectedAudioRingBuffer.h"
#include "AvatarAudioRingBuffer.h"
#include <AudioRingBuffer.h>
#include "PacketHeaders.h" #include "PacketHeaders.h"
#ifdef _WIN32 #ifdef _WIN32
@ -43,17 +45,9 @@
const unsigned short MIXER_LISTEN_PORT = 55443; const unsigned short MIXER_LISTEN_PORT = 55443;
const float SAMPLE_RATE = 22050.0;
const short JITTER_BUFFER_MSECS = 12; const short JITTER_BUFFER_MSECS = 12;
const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_MSECS * (SAMPLE_RATE / 1000.0); const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_MSECS * (SAMPLE_RATE / 1000.0);
const int BUFFER_LENGTH_BYTES = 1024;
const int BUFFER_LENGTH_SAMPLES_PER_CHANNEL = (BUFFER_LENGTH_BYTES / 2) / sizeof(int16_t);
const short RING_BUFFER_FRAMES = 10;
const short RING_BUFFER_SAMPLES = RING_BUFFER_FRAMES * BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
const float BUFFER_SEND_INTERVAL_USECS = (BUFFER_LENGTH_SAMPLES_PER_CHANNEL / SAMPLE_RATE) * 1000000; const float BUFFER_SEND_INTERVAL_USECS = (BUFFER_LENGTH_SAMPLES_PER_CHANNEL / SAMPLE_RATE) * 1000000;
const long MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max(); const long MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
@ -70,7 +64,11 @@ void plateauAdditionOfSamples(int16_t &mixSample, int16_t sampleToAdd) {
void attachNewBufferToAgent(Agent *newAgent) { void attachNewBufferToAgent(Agent *newAgent) {
if (!newAgent->getLinkedData()) { if (!newAgent->getLinkedData()) {
newAgent->setLinkedData(new AudioRingBuffer(RING_BUFFER_SAMPLES, BUFFER_LENGTH_SAMPLES_PER_CHANNEL)); if (newAgent->getType() == AGENT_TYPE_AVATAR) {
newAgent->setLinkedData(new AvatarAudioRingBuffer());
} else {
newAgent->setLinkedData(new InjectedAudioRingBuffer());
}
} }
} }
@ -113,203 +111,176 @@ int main(int argc, const char* argv[]) {
gettimeofday(&startTime, NULL); gettimeofday(&startTime, NULL);
while (true) { while (true) {
// enumerate the agents, check if we can add audio from the agent to current mix
for (AgentList::iterator agent = agentList->begin(); agent != agentList->end(); agent++) {
AudioRingBuffer* agentBuffer = (AudioRingBuffer*) agent->getLinkedData();
if (agentBuffer->getEndOfLastWrite()) {
if (!agentBuffer->isStarted()
&& agentBuffer->diffLastWriteNextOutput() <= BUFFER_LENGTH_SAMPLES_PER_CHANNEL + JITTER_BUFFER_SAMPLES) {
printf("Held back buffer for agent with ID %d.\n", agent->getAgentID());
agentBuffer->setShouldBeAddedToMix(false);
} else if (agentBuffer->diffLastWriteNextOutput() < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
printf("Buffer from agent with ID %d starved.\n", agent->getAgentID());
agentBuffer->setStarted(false);
agentBuffer->setShouldBeAddedToMix(false);
} else {
// good buffer, add this to the mix
agentBuffer->setStarted(true);
agentBuffer->setShouldBeAddedToMix(true);
}
}
}
int numAgents = agentList->size();
SharedAudioFactors audioFactors[numAgents][numAgents];
memset(audioFactors, 0, sizeof(audioFactors));
for (AgentList::iterator agent = agentList->begin(); agent != agentList->end(); agent++) { for (AgentList::iterator agent = agentList->begin(); agent != agentList->end(); agent++) {
if (agent->getType() == AGENT_TYPE_AVATAR) { if (agent->getType() == AGENT_TYPE_AVATAR) {
AudioRingBuffer* agentRingBuffer = (AudioRingBuffer*) agent->getLinkedData(); AvatarAudioRingBuffer* agentRingBuffer = (AvatarAudioRingBuffer*) agent->getLinkedData();
// zero out the client mix for this agent // zero out the client mix for this agent
memset(clientSamples, 0, sizeof(clientSamples)); memset(clientSamples, 0, sizeof(clientSamples));
for (AgentList::iterator otherAgent = agentList->begin(); otherAgent != agentList->end(); otherAgent++) { for (AgentList::iterator otherAgent = agentList->begin(); otherAgent != agentList->end(); otherAgent++) {
if (otherAgent != agent || (otherAgent == agent && agentRingBuffer->shouldLoopbackForAgent())) { if ((otherAgent != agent
AudioRingBuffer* otherAgentBuffer = (AudioRingBuffer*) otherAgent->getLinkedData(); && ((PositionalAudioRingBuffer*)otherAgent->getLinkedData())->shouldBeAddedToMix(JITTER_BUFFER_SAMPLES))
|| (otherAgent == agent && agentRingBuffer->shouldLoopbackForAgent())) {
if (otherAgentBuffer->shouldBeAddedToMix()) { PositionalAudioRingBuffer* otherAgentBuffer = (PositionalAudioRingBuffer*) otherAgent->getLinkedData();
otherAgentBuffer->setWasAddedToMix(true);
float bearingRelativeAngleToSource = 0.0f;
float attenuationCoefficient = 1.0f;
int numSamplesDelay = 0;
float weakChannelAmplitudeRatio = 1.0f;
stk::FreeVerb* otherAgentFreeVerb = NULL;
if (otherAgent != agent) {
glm::vec3 listenerPosition = agentRingBuffer->getPosition();
glm::vec3 relativePosition = otherAgentBuffer->getPosition() - agentRingBuffer->getPosition();
glm::quat inverseOrientation = glm::inverse(agentRingBuffer->getOrientation());
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
float bearingRelativeAngleToSource = 0.f; float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
float attenuationCoefficient = 1.0f; float radius = 0.0f;
int numSamplesDelay = 0;
float weakChannelAmplitudeRatio = 1.0f;
stk::FreeVerb* otherAgentFreeVerb = NULL; if (otherAgent->getType() == AGENT_TYPE_AUDIO_INJECTOR) {
radius = ((InjectedAudioRingBuffer*) otherAgentBuffer)->getRadius();
if (otherAgent != agent) {
glm::vec3 listenerPosition = agentRingBuffer->getPosition();
glm::vec3 relativePosition = otherAgentBuffer->getPosition() - agentRingBuffer->getPosition();
glm::quat inverseOrientation = glm::inverse(agentRingBuffer->getOrientation());
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
float distanceCoefficient = 1.0f;
float offAxisCoefficient = 1.0f;
if (otherAgentBuffer->getRadius() == 0
|| (distanceSquareToSource > (otherAgentBuffer->getRadius()
* otherAgentBuffer->getRadius()))) {
// this is either not a spherical source, or the listener is outside the sphere
if (otherAgentBuffer->getRadius() > 0) {
// this is a spherical source - the distance used for the coefficient
// needs to be the closest point on the boundary to the source
// multiply the normalized vector between the center of the sphere
// and the position of the source by the radius to get the
// closest point on the boundary of the sphere to the source
glm::vec3 closestPoint = glm::normalize(relativePosition) * otherAgentBuffer->getRadius();
// for the other calculations the agent position is the closest point on the sphere
rotatedSourcePosition = inverseOrientation * closestPoint;
// ovveride the distance to the agent with the distance to the point on the
// boundary of the sphere
distanceSquareToSource = glm::distance2(listenerPosition, -closestPoint);
} else {
// calculate the angle delivery
glm::vec3 rotatedListenerPosition = glm::inverse(otherAgentBuffer->getOrientation())
* relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition));
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f));
}
const float DISTANCE_SCALE = 2.5f;
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
const float DISTANCE_LOG_BASE = 2.5f;
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
// calculate the distance coefficient using the distance to this agent
distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
DISTANCE_SCALE_LOG +
(logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
distanceCoefficient = std::min(1.0f, distanceCoefficient);
// off-axis attenuation and spatialization of audio
// not performed if listener is inside spherical injector
// calculate the angle from the source to the listener
// project the rotated source position vector onto the XZ plane
rotatedSourcePosition.y = 0.0f;
// produce an oriented angle about the y-axis
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedSourcePosition),
glm::vec3(0.0f, 1.0f, 0.0f));
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
const int PHASE_DELAY_AT_90 = 20;
float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource)));
numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio;
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
}
attenuationCoefficient = distanceCoefficient
* otherAgentBuffer->getAttenuationRatio()
* offAxisCoefficient;
bearingRelativeAngleToSource *= (M_PI / 180);
float sinRatio = fabsf(sinf(bearingRelativeAngleToSource));
numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio;
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
FreeVerbAgentMap& agentFreeVerbs = agentRingBuffer->getFreeVerbs();
FreeVerbAgentMap::iterator freeVerbIterator = agentFreeVerbs.find(otherAgent->getAgentID());
if (freeVerbIterator == agentFreeVerbs.end()) {
// setup the freeVerb effect for this source for this client
printf("Creating a new FV object\n");
otherAgentFreeVerb = agentFreeVerbs[otherAgent->getAgentID()] = new stk::FreeVerb;
otherAgentFreeVerb->setDamping(DISTANCE_REVERB_DAMPING);
otherAgentFreeVerb->setRoomSize(DISTANCE_REVERB_ROOM_SIZE);
otherAgentFreeVerb->setWidth(DISTANCE_REVERB_WIDTH);
} else {
otherAgentFreeVerb = freeVerbIterator->second;
}
otherAgentFreeVerb->setEffectMix(audioFactors[lowAgentIndex][highAgentIndex].effectMix);
} }
int16_t* goodChannel = bearingRelativeAngleToSource > 0.0f if (radius == 0 || (distanceSquareToSource > radius * radius)) {
? clientSamples // this is either not a spherical source, or the listener is outside the sphere
: clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
int16_t* delayedChannel = bearingRelativeAngleToSource > 0.0f if (radius > 0) {
? clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL // this is a spherical source - the distance used for the coefficient
: clientSamples; // needs to be the closest point on the boundary to the source
int16_t* delaySamplePointer = otherAgentBuffer->getNextOutput() == otherAgentBuffer->getBuffer()
? otherAgentBuffer->getBuffer() + RING_BUFFER_SAMPLES - numSamplesDelay
: otherAgentBuffer->getNextOutput() - numSamplesDelay;
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) {
if (s < numSamplesDelay) {
// pull the earlier sample for the delayed channel
int earlierSample = delaySamplePointer[s]
* attenuationCoefficient
* weakChannelAmplitudeRatio;
// apply the STK FreeVerb effect // multiply the normalized vector between the center of the sphere
if (otherAgentFreeVerb) { // and the position of the source by the radius to get the
earlierSample = otherAgentFreeVerb->tick(earlierSample); // closest point on the boundary of the sphere to the source
}
plateauAdditionOfSamples(delayedChannel[s], earlierSample); glm::vec3 closestPoint = glm::normalize(relativePosition) * radius;
// for the other calculations the agent position is the closest point on the sphere
rotatedSourcePosition = inverseOrientation * closestPoint;
// ovveride the distance to the agent with the distance to the point on the
// boundary of the sphere
distanceSquareToSource = glm::distance2(listenerPosition, -closestPoint);
} else {
// calculate the angle delivery for off-axis attenuation
glm::vec3 rotatedListenerPosition = glm::inverse(otherAgentBuffer->getOrientation())
* relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition));
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f));
// multiply the current attenuation coefficient by the calculated off axis coefficient
attenuationCoefficient *= offAxisCoefficient;
} }
int16_t currentSample = (otherAgentBuffer->getNextOutput()[s] * attenuationCoefficient); const float DISTANCE_SCALE = 2.5f;
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
const float DISTANCE_LOG_BASE = 2.5f;
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
// calculate the distance coefficient using the distance to this agent
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
DISTANCE_SCALE_LOG +
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
distanceCoefficient = std::min(1.0f, distanceCoefficient);
// multiply the current attenuation coefficient by the distance coefficient
attenuationCoefficient *= distanceCoefficient;
// project the rotated source position vector onto the XZ plane
rotatedSourcePosition.y = 0.0f;
// produce an oriented angle about the y-axis
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedSourcePosition),
glm::vec3(0.0f, 1.0f, 0.0f));
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
const int PHASE_DELAY_AT_90 = 20;
// figure out the number of samples of delay and the ratio of the amplitude
// in the weak channel for audio spatialization
float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource)));
numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio;
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
}
FreeVerbAgentMap& agentFreeVerbs = agentRingBuffer->getFreeVerbs();
FreeVerbAgentMap::iterator freeVerbIterator = agentFreeVerbs.find(otherAgent->getAgentID());
if (freeVerbIterator == agentFreeVerbs.end()) {
// setup the freeVerb effect for this source for this client
otherAgentFreeVerb = agentFreeVerbs[otherAgent->getAgentID()] = new stk::FreeVerb;
otherAgentFreeVerb->setDamping(DISTANCE_REVERB_DAMPING);
otherAgentFreeVerb->setRoomSize(DISTANCE_REVERB_ROOM_SIZE);
otherAgentFreeVerb->setWidth(DISTANCE_REVERB_WIDTH);
} else {
otherAgentFreeVerb = freeVerbIterator->second;
}
const float DISTANCE_REVERB_LOG_REMAINDER = 0.32f;
const float DISTANCE_REVERB_MAX_WETNESS = 1.0f;
float effectMix = powf(2.0f, (0.5f * logf(distanceSquareToSource)
/ logf(2.0f)) - DISTANCE_REVERB_LOG_REMAINDER)
* DISTANCE_REVERB_MAX_WETNESS / 64.0f;;
otherAgentFreeVerb->setEffectMix(effectMix);
}
int16_t* goodChannel = (bearingRelativeAngleToSource > 0.0f)
? clientSamples
: clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
int16_t* delayedChannel = (bearingRelativeAngleToSource > 0.0f)
? clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL
: clientSamples;
int16_t* delaySamplePointer = otherAgentBuffer->getNextOutput() == otherAgentBuffer->getBuffer()
? otherAgentBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES - numSamplesDelay
: otherAgentBuffer->getNextOutput() - numSamplesDelay;
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) {
if (s < numSamplesDelay) {
// pull the earlier sample for the delayed channel
int earlierSample = delaySamplePointer[s]
* attenuationCoefficient
* weakChannelAmplitudeRatio;
// apply the STK FreeVerb effect // apply the STK FreeVerb effect
if (otherAgentFreeVerb) { if (otherAgentFreeVerb) {
currentSample = otherAgentFreeVerb->tick(currentSample); earlierSample = otherAgentFreeVerb->tick(earlierSample);
} }
plateauAdditionOfSamples(goodChannel[s], currentSample); plateauAdditionOfSamples(delayedChannel[s], earlierSample);
}
if (s + numSamplesDelay < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
plateauAdditionOfSamples(delayedChannel[s + numSamplesDelay], int16_t currentSample = (otherAgentBuffer->getNextOutput()[s] * attenuationCoefficient);
currentSample * weakChannelAmplitudeRatio);
} // apply the STK FreeVerb effect
if (otherAgentFreeVerb) {
currentSample = otherAgentFreeVerb->tick(currentSample);
}
plateauAdditionOfSamples(goodChannel[s], currentSample);
if (s + numSamplesDelay < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
plateauAdditionOfSamples(delayedChannel[s + numSamplesDelay],
currentSample * weakChannelAmplitudeRatio);
} }
} }
} }
@ -322,15 +293,15 @@ int main(int argc, const char* argv[]) {
// push forward the next output pointers for any audio buffers we used // push forward the next output pointers for any audio buffers we used
for (AgentList::iterator agent = agentList->begin(); agent != agentList->end(); agent++) { for (AgentList::iterator agent = agentList->begin(); agent != agentList->end(); agent++) {
AudioRingBuffer* agentBuffer = (AudioRingBuffer*) agent->getLinkedData(); PositionalAudioRingBuffer* agentBuffer = (PositionalAudioRingBuffer*) agent->getLinkedData();
if (agentBuffer && agentBuffer->shouldBeAddedToMix()) { if (agentBuffer && agentBuffer->wasAddedToMix()) {
agentBuffer->setNextOutput(agentBuffer->getNextOutput() + BUFFER_LENGTH_SAMPLES_PER_CHANNEL); agentBuffer->setNextOutput(agentBuffer->getNextOutput() + BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
if (agentBuffer->getNextOutput() >= agentBuffer->getBuffer() + RING_BUFFER_SAMPLES) { if (agentBuffer->getNextOutput() >= agentBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
agentBuffer->setNextOutput(agentBuffer->getBuffer()); agentBuffer->setNextOutput(agentBuffer->getBuffer());
} }
agentBuffer->setShouldBeAddedToMix(false); agentBuffer->setWasAddedToMix(false);
} }
} }
@ -348,7 +319,7 @@ int main(int argc, const char* argv[]) {
agentList->updateAgentWithData(agentAddress, packetData, receivedBytes); agentList->updateAgentWithData(agentAddress, packetData, receivedBytes);
if (std::isnan(((AudioRingBuffer *)avatarAgent->getLinkedData())->getOrientation().x)) { if (std::isnan(((PositionalAudioRingBuffer *)avatarAgent->getLinkedData())->getOrientation().x)) {
// kill off this agent - temporary solution to mixer crash on mac sleep // kill off this agent - temporary solution to mixer crash on mac sleep
avatarAgent->setAlive(false); avatarAgent->setAlive(false);
} }
@ -358,7 +329,7 @@ int main(int argc, const char* argv[]) {
for (AgentList::iterator agent = agentList->begin(); agent != agentList->end(); agent++) { for (AgentList::iterator agent = agentList->begin(); agent != agentList->end(); agent++) {
if (agent->getLinkedData()) { if (agent->getLinkedData()) {
AudioRingBuffer* ringBuffer = (AudioRingBuffer*) agent->getLinkedData(); InjectedAudioRingBuffer* ringBuffer = (InjectedAudioRingBuffer*) agent->getLinkedData();
if (memcmp(ringBuffer->getStreamIdentifier(), if (memcmp(ringBuffer->getStreamIdentifier(),
packetData + 1, packetData + 1,
STREAM_IDENTIFIER_NUM_BYTES) == 0) { STREAM_IDENTIFIER_NUM_BYTES) == 0) {

View file

@ -32,12 +32,6 @@ const int PACKET_LENGTH_BYTES_PER_CHANNEL = PACKET_LENGTH_BYTES / 2;
const int PACKET_LENGTH_SAMPLES = PACKET_LENGTH_BYTES / sizeof(int16_t); const int PACKET_LENGTH_SAMPLES = PACKET_LENGTH_BYTES / sizeof(int16_t);
const int PACKET_LENGTH_SAMPLES_PER_CHANNEL = PACKET_LENGTH_SAMPLES / 2; const int PACKET_LENGTH_SAMPLES_PER_CHANNEL = PACKET_LENGTH_SAMPLES / 2;
const int BUFFER_LENGTH_BYTES = 512;
const int BUFFER_LENGTH_SAMPLES = BUFFER_LENGTH_BYTES / sizeof(int16_t);
const int RING_BUFFER_FRAMES = 10;
const int RING_BUFFER_SAMPLES = RING_BUFFER_FRAMES * BUFFER_LENGTH_SAMPLES;
const int PHASE_DELAY_AT_90 = 20; const int PHASE_DELAY_AT_90 = 20;
const float AMPLITUDE_RATIO_AT_90 = 0.5; const float AMPLITUDE_RATIO_AT_90 = 0.5;
@ -47,12 +41,11 @@ const float FLANGE_BASE_RATE = 4;
const float MAX_FLANGE_SAMPLE_WEIGHT = 0.50; const float MAX_FLANGE_SAMPLE_WEIGHT = 0.50;
const float MIN_FLANGE_INTENSITY = 0.25; const float MIN_FLANGE_INTENSITY = 0.25;
const int SAMPLE_RATE = 22050;
const float JITTER_BUFFER_LENGTH_MSECS = 12; const float JITTER_BUFFER_LENGTH_MSECS = 12;
const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_LENGTH_MSECS * const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_LENGTH_MSECS *
NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0); NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0);
const float AUDIO_CALLBACK_MSECS = (float)BUFFER_LENGTH_SAMPLES / (float)SAMPLE_RATE * 1000.0; const float AUDIO_CALLBACK_MSECS = (float)BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
const int AGENT_LOOPBACK_MODIFIER = 307; const int AGENT_LOOPBACK_MODIFIER = 307;
@ -92,7 +85,7 @@ int audioCallback (const void* inputBuffer,
int16_t* outputRight = ((int16_t**) outputBuffer)[1]; int16_t* outputRight = ((int16_t**) outputBuffer)[1];
// Add Procedural effects to input samples // Add Procedural effects to input samples
parentAudio->addProceduralSounds(inputLeft, BUFFER_LENGTH_SAMPLES); parentAudio->addProceduralSounds(inputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
// add output (@speakers) data to the scope // add output (@speakers) data to the scope
parentAudio->_scope->addSamples(1, outputLeft, PACKET_LENGTH_SAMPLES_PER_CHANNEL); parentAudio->_scope->addSamples(1, outputLeft, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
@ -120,15 +113,15 @@ int audioCallback (const void* inputBuffer,
// Measure the loudness of the signal from the microphone and store in audio object // Measure the loudness of the signal from the microphone and store in audio object
float loudness = 0; float loudness = 0;
for (int i = 0; i < BUFFER_LENGTH_SAMPLES; i++) { for (int i = 0; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
loudness += abs(inputLeft[i]); loudness += abs(inputLeft[i]);
} }
loudness /= BUFFER_LENGTH_SAMPLES; loudness /= BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
parentAudio->_lastInputLoudness = loudness; parentAudio->_lastInputLoudness = loudness;
// add input (@microphone) data to the scope // add input (@microphone) data to the scope
parentAudio->_scope->addSamples(0, inputLeft, BUFFER_LENGTH_SAMPLES); parentAudio->_scope->addSamples(0, inputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
Agent* audioMixer = agentList->soloAgentOfType(AGENT_TYPE_AUDIO_MIXER); Agent* audioMixer = agentList->soloAgentOfType(AGENT_TYPE_AUDIO_MIXER);
@ -172,11 +165,11 @@ int audioCallback (const void* inputBuffer,
// if we've been reset, and there isn't any new packets yet // if we've been reset, and there isn't any new packets yet
// just play some silence // just play some silence
if (ringBuffer->getEndOfLastWrite() != NULL) { if (ringBuffer->getEndOfLastWrite()) {
if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES) { if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES) {
//printLog("Held back, buffer has %d of %d samples required.\n", // printLog("Held back, buffer has %d of %d samples required.\n",
// ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES); // ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES);
} else if (ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES) { } else if (ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES) {
ringBuffer->setStarted(false); ringBuffer->setStarted(false);
@ -236,7 +229,7 @@ int audioCallback (const void* inputBuffer,
// we need to grab the flange sample from earlier in the buffer // we need to grab the flange sample from earlier in the buffer
flangeFrame = ringBuffer->getNextOutput() != ringBuffer->getBuffer() flangeFrame = ringBuffer->getNextOutput() != ringBuffer->getBuffer()
? ringBuffer->getNextOutput() - PACKET_LENGTH_SAMPLES ? ringBuffer->getNextOutput() - PACKET_LENGTH_SAMPLES
: ringBuffer->getNextOutput() + RING_BUFFER_SAMPLES - PACKET_LENGTH_SAMPLES; : ringBuffer->getNextOutput() + RING_BUFFER_LENGTH_SAMPLES - PACKET_LENGTH_SAMPLES;
flangeIndex = PACKET_LENGTH_SAMPLES_PER_CHANNEL + (s - sampleFlangeDelay); flangeIndex = PACKET_LENGTH_SAMPLES_PER_CHANNEL + (s - sampleFlangeDelay);
} }
@ -260,7 +253,7 @@ int audioCallback (const void* inputBuffer,
} }
ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES); ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES);
if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_SAMPLES) { if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
ringBuffer->setNextOutput(ringBuffer->getBuffer()); ringBuffer->setNextOutput(ringBuffer->getBuffer());
} }
} }
@ -290,7 +283,7 @@ void outputPortAudioError(PaError error) {
Audio::Audio(Oscilloscope* scope) : Audio::Audio(Oscilloscope* scope) :
_stream(NULL), _stream(NULL),
_ringBuffer(RING_BUFFER_SAMPLES, PACKET_LENGTH_SAMPLES), _ringBuffer(true),
_scope(scope), _scope(scope),
_averagedLatency(0.0), _averagedLatency(0.0),
_measuredJitter(0), _measuredJitter(0),
@ -315,7 +308,7 @@ Audio::Audio(Oscilloscope* scope) :
2, 2,
(paInt16 | paNonInterleaved), (paInt16 | paNonInterleaved),
SAMPLE_RATE, SAMPLE_RATE,
BUFFER_LENGTH_SAMPLES, BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
audioCallback, audioCallback,
(void*) this)); (void*) this));
@ -324,8 +317,8 @@ Audio::Audio(Oscilloscope* scope) :
_echoInputSamples = new int16_t[BUFFER_LENGTH_BYTES]; _echoInputSamples = new int16_t[BUFFER_LENGTH_BYTES];
_echoOutputSamples = new int16_t[BUFFER_LENGTH_BYTES]; _echoOutputSamples = new int16_t[BUFFER_LENGTH_BYTES];
memset(_echoInputSamples, 0, BUFFER_LENGTH_SAMPLES * sizeof(int)); memset(_echoInputSamples, 0, BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int));
memset(_echoOutputSamples, 0, BUFFER_LENGTH_SAMPLES * sizeof(int)); memset(_echoOutputSamples, 0, BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int));
gettimeofday(&_lastReceiveTime, NULL); gettimeofday(&_lastReceiveTime, NULL);
} }
@ -347,13 +340,13 @@ void Audio::renderEchoCompare() {
glDisable(GL_LINE_SMOOTH); glDisable(GL_LINE_SMOOTH);
glColor3f(1,1,1); glColor3f(1,1,1);
glBegin(GL_LINE_STRIP); glBegin(GL_LINE_STRIP);
for (int i = 0; i < BUFFER_LENGTH_SAMPLES; i++) { for (int i = 0; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
glVertex2f(XPOS + i * XSCALE, YPOS + _echoInputSamples[i]/YSCALE); glVertex2f(XPOS + i * XSCALE, YPOS + _echoInputSamples[i]/YSCALE);
} }
glEnd(); glEnd();
glColor3f(0,1,1); glColor3f(0,1,1);
glBegin(GL_LINE_STRIP); glBegin(GL_LINE_STRIP);
for (int i = 0; i < BUFFER_LENGTH_SAMPLES; i++) { for (int i = 0; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
glVertex2f(XPOS + i * XSCALE, YPOS + _echoOutputSamples[i]/YSCALE); glVertex2f(XPOS + i * XSCALE, YPOS + _echoOutputSamples[i]/YSCALE);
} }
glEnd(); glEnd();
@ -468,7 +461,7 @@ void Audio::render(int screenWidth, int screenHeight) {
glVertex2f(currentX, topY); glVertex2f(currentX, topY);
glVertex2f(currentX, bottomY); glVertex2f(currentX, bottomY);
for (int i = 0; i < RING_BUFFER_FRAMES; i++) { for (int i = 0; i < RING_BUFFER_LENGTH_FRAMES; i++) {
glVertex2f(currentX, halfY); glVertex2f(currentX, halfY);
glVertex2f(currentX + frameWidth, halfY); glVertex2f(currentX + frameWidth, halfY);
currentX += frameWidth; currentX += frameWidth;

View file

@ -110,10 +110,10 @@ void AudioInjector::injectAudio(UDPSocket* injectorSocket, sockaddr* destination
gettimeofday(&startTime, NULL); gettimeofday(&startTime, NULL);
int nextFrame = 0; int nextFrame = 0;
for (int i = 0; i < _numTotalSamples; i += BUFFER_LENGTH_SAMPLES) { for (int i = 0; i < _numTotalSamples; i += BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
int numSamplesToCopy = BUFFER_LENGTH_SAMPLES; int numSamplesToCopy = BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
if (_numTotalSamples - i < BUFFER_LENGTH_SAMPLES) { if (_numTotalSamples - i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
numSamplesToCopy = _numTotalSamples - i; numSamplesToCopy = _numTotalSamples - i;
memset(currentPacketPtr + numSamplesToCopy, 0, BUFFER_LENGTH_BYTES - (numSamplesToCopy * sizeof(int16_t))); memset(currentPacketPtr + numSamplesToCopy, 0, BUFFER_LENGTH_BYTES - (numSamplesToCopy * sizeof(int16_t)));
} }
@ -122,7 +122,7 @@ void AudioInjector::injectAudio(UDPSocket* injectorSocket, sockaddr* destination
injectorSocket->send(destinationSocket, dataPacket, sizeof(dataPacket)); injectorSocket->send(destinationSocket, dataPacket, sizeof(dataPacket));
double usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow(); double usecToSleep = usecTimestamp(&startTime) + (++nextFrame * INJECT_INTERVAL_USECS) - usecTimestampNow();
if (usecToSleep > 0) { if (usecToSleep > 0) {
usleep(usecToSleep); usleep(usecToSleep);
} }

View file

@ -9,16 +9,18 @@
#ifndef __hifi__AudioInjector__ #ifndef __hifi__AudioInjector__
#define __hifi__AudioInjector__ #define __hifi__AudioInjector__
#include <iostream>
#include <glm/glm.hpp> #include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <UDPSocket.h>
#include "AudioRingBuffer.h" #include "AudioRingBuffer.h"
const int BUFFER_LENGTH_BYTES = 512; const int STREAM_IDENTIFIER_NUM_BYTES = 8;
const int BUFFER_LENGTH_SAMPLES = BUFFER_LENGTH_BYTES / sizeof(int16_t); const char INJECT_AUDIO_AT_POINT_COMMAND = 'P';
const float SAMPLE_RATE = 22050.0f; const char INJECT_AUDIO_AT_CUBE_COMMAND = 'C';
const float BUFFER_SEND_INTERVAL_USECS = (BUFFER_LENGTH_SAMPLES / SAMPLE_RATE) * 1000000;
const float INJECT_INTERVAL_USECS = (BUFFER_LENGTH_SAMPLES_PER_CHANNEL / SAMPLE_RATE) * 1000000;
class AudioInjector { class AudioInjector {
public: public:

View file

@ -13,103 +13,61 @@
#include "AudioRingBuffer.h" #include "AudioRingBuffer.h"
AudioRingBuffer::AudioRingBuffer(int ringSamples, int bufferSamples) : AudioRingBuffer::AudioRingBuffer(bool isStereo) :
AgentData(NULL), AgentData(NULL),
_ringBufferLengthSamples(ringSamples),
_bufferLengthSamples(bufferSamples),
_radius(0.0f),
_endOfLastWrite(NULL), _endOfLastWrite(NULL),
_started(false), _isStarted(false),
_shouldBeAddedToMix(false), _isStereo(isStereo)
_shouldLoopbackForAgent(false),
_streamIdentifier()
{ {
_buffer = new int16_t[_ringBufferLengthSamples]; _buffer = new int16_t[RING_BUFFER_LENGTH_SAMPLES];
_nextOutput = _buffer; _nextOutput = _buffer;
}; };
AudioRingBuffer::~AudioRingBuffer() { AudioRingBuffer::~AudioRingBuffer() {
delete[] _buffer; delete[] _buffer;
};
const int AGENT_LOOPBACK_MODIFIER = 307;
int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
unsigned char* dataBuffer = sourceBuffer + 1;
if (sourceBuffer[0] == PACKET_HEADER_INJECT_AUDIO ||
sourceBuffer[0] == PACKET_HEADER_MICROPHONE_AUDIO) {
// if this came from an injector or interface client
// there's data required for spatialization to pull out
if (sourceBuffer[0] == PACKET_HEADER_INJECT_AUDIO) {
// we've got a stream identifier to pull from the packet
memcpy(&_streamIdentifier, dataBuffer, sizeof(_streamIdentifier));
dataBuffer += sizeof(_streamIdentifier);
// push past the injection command
dataBuffer += sizeof(INJECT_AUDIO_AT_POINT_COMMAND);
}
memcpy(&_position, dataBuffer, sizeof(_position));
dataBuffer += sizeof(_position);
if (sourceBuffer[0] == PACKET_HEADER_INJECT_AUDIO && sourceBuffer[1] == INJECT_AUDIO_AT_CUBE_COMMAND) {
// this is audio that needs to be injected as a volume (cube)
// parse out the cubeHalfHeight sent by the client
memcpy(&_radius, dataBuffer, sizeof(_radius));
dataBuffer += sizeof(_radius);
}
unsigned int attenuationByte = *(dataBuffer++);
_attenuationRatio = attenuationByte / 255.0f;
memcpy(&_orientation, dataBuffer, sizeof(_orientation));
dataBuffer += sizeof(_orientation);
// if this agent sent us a NaN for first float in orientation then don't consider this good audio and bail
if (std::isnan(_orientation.x)) {
_endOfLastWrite = _nextOutput = _buffer;
_started = false;
return 0;
} else {
// currently no possiblity for loopback, need to add once quaternion audio is working again
_shouldLoopbackForAgent = false;
}
}
// make sure we have enough bytes left for this to be the right amount of audio
// otherwise we should not copy that data, and leave the buffer pointers where they are
if (numBytes - (dataBuffer - sourceBuffer) == _bufferLengthSamples * sizeof(int16_t)) {
if (!_endOfLastWrite) {
_endOfLastWrite = _buffer;
} else if (diffLastWriteNextOutput() > _ringBufferLengthSamples - _bufferLengthSamples) {
_endOfLastWrite = _buffer;
_nextOutput = _buffer;
_started = false;
}
memcpy(_endOfLastWrite, dataBuffer, _bufferLengthSamples * sizeof(int16_t));
_endOfLastWrite += _bufferLengthSamples;
if (_endOfLastWrite >= _buffer + _ringBufferLengthSamples) {
_endOfLastWrite = _buffer;
}
}
return numBytes;
} }
short AudioRingBuffer::diffLastWriteNextOutput() { int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
return parseAudioSamples(sourceBuffer + sizeof(PACKET_HEADER_MIXED_AUDIO), numBytes - sizeof(PACKET_HEADER_MIXED_AUDIO));
}
int AudioRingBuffer::parseAudioSamples(unsigned char* sourceBuffer, int numBytes) {
// make sure we have enough bytes left for this to be the right amount of audio
// otherwise we should not copy that data, and leave the buffer pointers where they are
int samplesToCopy = BUFFER_LENGTH_SAMPLES_PER_CHANNEL * (_isStereo ? 2 : 1);
if (numBytes == samplesToCopy * sizeof(int16_t)) {
if (!_endOfLastWrite) {
_endOfLastWrite = _buffer;
} else if (diffLastWriteNextOutput() > RING_BUFFER_LENGTH_SAMPLES - samplesToCopy) {
_endOfLastWrite = _buffer;
_nextOutput = _buffer;
_isStarted = false;
}
memcpy(_endOfLastWrite, sourceBuffer, numBytes);
_endOfLastWrite += samplesToCopy;
if (_endOfLastWrite >= _buffer + RING_BUFFER_LENGTH_SAMPLES) {
_endOfLastWrite = _buffer;
}
return numBytes;
} else {
return 0;
}
}
int AudioRingBuffer::diffLastWriteNextOutput() const {
if (!_endOfLastWrite) { if (!_endOfLastWrite) {
return 0; return 0;
} else { } else {
short sampleDifference = _endOfLastWrite - _nextOutput; int sampleDifference = _endOfLastWrite - _nextOutput;
if (sampleDifference < 0) { if (sampleDifference < 0) {
sampleDifference += _ringBufferLengthSamples; sampleDifference += RING_BUFFER_LENGTH_SAMPLES;
} }
return sampleDifference; return sampleDifference;

View file

@ -13,26 +13,25 @@
#include <map> #include <map>
#include <glm/glm.hpp> #include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <Stk.h>
#include <FreeVerb.h>
#include "AgentData.h" #include "AgentData.h"
const int STREAM_IDENTIFIER_NUM_BYTES = 8; const float SAMPLE_RATE = 22050.0;
typedef std::map<uint16_t, stk::FreeVerb*> FreeVerbAgentMap;
const char INJECT_AUDIO_AT_POINT_COMMAND = 'P'; const int BUFFER_LENGTH_BYTES = 1024;
const char INJECT_AUDIO_AT_CUBE_COMMAND = 'C'; const int BUFFER_LENGTH_SAMPLES_PER_CHANNEL = (BUFFER_LENGTH_BYTES / 2) / sizeof(int16_t);
const short RING_BUFFER_LENGTH_FRAMES = 10;
const short RING_BUFFER_LENGTH_SAMPLES = RING_BUFFER_LENGTH_FRAMES * BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
class AudioRingBuffer : public AgentData { class AudioRingBuffer : public AgentData {
public: public:
AudioRingBuffer(int ringSamples, int bufferSamples); AudioRingBuffer(bool isStereo);
~AudioRingBuffer(); ~AudioRingBuffer();
int parseData(unsigned char* sourceBuffer, int numBytes); int parseData(unsigned char* sourceBuffer, int numBytes);
int parseAudioSamples(unsigned char* sourceBuffer, int numBytes);
float getRadius() const { return _radius; }
int16_t* getNextOutput() const { return _nextOutput; } int16_t* getNextOutput() const { return _nextOutput; }
void setNextOutput(int16_t* nextOutput) { _nextOutput = nextOutput; } void setNextOutput(int16_t* nextOutput) { _nextOutput = nextOutput; }
@ -42,41 +41,20 @@ public:
int16_t* getBuffer() const { return _buffer; } int16_t* getBuffer() const { return _buffer; }
FreeVerbAgentMap& getFreeVerbs() { return _freeVerbs; } bool isStarted() const { return _isStarted; }
void setStarted(bool isStarted) { _isStarted = isStarted; }
bool isStarted() const { return _started; }
void setStarted(bool started) { _started = started; }
bool shouldBeAddedToMix() const { return _shouldBeAddedToMix; }
void setShouldBeAddedToMix(bool shouldBeAddedToMix) { _shouldBeAddedToMix = shouldBeAddedToMix; }
const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; }
float getAttenuationRatio() const { return _attenuationRatio; }
bool shouldLoopbackForAgent() const { return _shouldLoopbackForAgent; }
const unsigned char* getStreamIdentifier() const { return _streamIdentifier; }
short diffLastWriteNextOutput(); int diffLastWriteNextOutput() const;
private: protected:
// disallow copying of AudioRingBuffer objects // disallow copying of AudioRingBuffer objects
AudioRingBuffer(const AudioRingBuffer&); AudioRingBuffer(const AudioRingBuffer&);
AudioRingBuffer& operator= (const AudioRingBuffer&); AudioRingBuffer& operator= (const AudioRingBuffer&);
int _ringBufferLengthSamples;
int _bufferLengthSamples;
glm::vec3 _position;
glm::quat _orientation;
float _radius;
float _attenuationRatio;
int16_t* _nextOutput; int16_t* _nextOutput;
int16_t* _endOfLastWrite; int16_t* _endOfLastWrite;
int16_t* _buffer; int16_t* _buffer;
bool _started; bool _isStarted;
bool _shouldBeAddedToMix; bool _isStereo;
bool _shouldLoopbackForAgent;
unsigned char _streamIdentifier[STREAM_IDENTIFIER_NUM_BYTES];
FreeVerbAgentMap _freeVerbs;
}; };
#endif /* defined(__interface__AudioRingBuffer__) */ #endif /* defined(__interface__AudioRingBuffer__) */