mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 21:12:53 +02:00
Merge pull request #497 from birarda/synthesis
remove the CPU hog that is FreeVerb for now
This commit is contained in:
commit
3fda876c91
4 changed files with 3 additions and 74 deletions
|
@ -11,18 +11,10 @@
|
||||||
#include "AvatarAudioRingBuffer.h"
|
#include "AvatarAudioRingBuffer.h"
|
||||||
|
|
||||||
AvatarAudioRingBuffer::AvatarAudioRingBuffer() :
|
AvatarAudioRingBuffer::AvatarAudioRingBuffer() :
|
||||||
_freeVerbs(),
|
|
||||||
_shouldLoopbackForAgent(false) {
|
_shouldLoopbackForAgent(false) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AvatarAudioRingBuffer::~AvatarAudioRingBuffer() {
|
|
||||||
// enumerate the freeVerbs map and delete the FreeVerb objects
|
|
||||||
for (FreeVerbAgentMap::iterator verbIterator = _freeVerbs.begin(); verbIterator != _freeVerbs.end(); verbIterator++) {
|
|
||||||
delete verbIterator->second;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int AvatarAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
|
int AvatarAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
|
||||||
_shouldLoopbackForAgent = (sourceBuffer[0] == PACKET_HEADER_MICROPHONE_AUDIO_WITH_ECHO);
|
_shouldLoopbackForAgent = (sourceBuffer[0] == PACKET_HEADER_MICROPHONE_AUDIO_WITH_ECHO);
|
||||||
return PositionalAudioRingBuffer::parseData(sourceBuffer, numBytes);
|
return PositionalAudioRingBuffer::parseData(sourceBuffer, numBytes);
|
||||||
|
|
|
@ -9,28 +9,20 @@
|
||||||
#ifndef __hifi__AvatarAudioRingBuffer__
|
#ifndef __hifi__AvatarAudioRingBuffer__
|
||||||
#define __hifi__AvatarAudioRingBuffer__
|
#define __hifi__AvatarAudioRingBuffer__
|
||||||
|
|
||||||
#include <Stk.h>
|
|
||||||
#include <FreeVerb.h>
|
|
||||||
|
|
||||||
#include "PositionalAudioRingBuffer.h"
|
#include "PositionalAudioRingBuffer.h"
|
||||||
|
|
||||||
typedef std::map<uint16_t, stk::FreeVerb*> FreeVerbAgentMap;
|
|
||||||
|
|
||||||
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
|
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
|
||||||
public:
|
public:
|
||||||
AvatarAudioRingBuffer();
|
AvatarAudioRingBuffer();
|
||||||
~AvatarAudioRingBuffer();
|
|
||||||
|
|
||||||
int parseData(unsigned char* sourceBuffer, int numBytes);
|
int parseData(unsigned char* sourceBuffer, int numBytes);
|
||||||
|
|
||||||
FreeVerbAgentMap& getFreeVerbs() { return _freeVerbs; }
|
|
||||||
bool shouldLoopbackForAgent() const { return _shouldLoopbackForAgent; }
|
bool shouldLoopbackForAgent() const { return _shouldLoopbackForAgent; }
|
||||||
private:
|
private:
|
||||||
// disallow copying of AvatarAudioRingBuffer objects
|
// disallow copying of AvatarAudioRingBuffer objects
|
||||||
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
||||||
AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&);
|
AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&);
|
||||||
|
|
||||||
FreeVerbAgentMap _freeVerbs;
|
|
||||||
bool _shouldLoopbackForAgent;
|
bool _shouldLoopbackForAgent;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -104,11 +104,6 @@ int main(int argc, const char* argv[]) {
|
||||||
|
|
||||||
int16_t clientSamples[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2] = {};
|
int16_t clientSamples[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2] = {};
|
||||||
|
|
||||||
// setup STK for the reverb effect
|
|
||||||
const float DISTANCE_REVERB_DAMPING = 0.6f;
|
|
||||||
const float DISTANCE_REVERB_ROOM_SIZE = 0.75f;
|
|
||||||
const float DISTANCE_REVERB_WIDTH = 0.5f;
|
|
||||||
|
|
||||||
gettimeofday(&startTime, NULL);
|
gettimeofday(&startTime, NULL);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -129,8 +124,6 @@ int main(int argc, const char* argv[]) {
|
||||||
// zero out the client mix for this agent
|
// zero out the client mix for this agent
|
||||||
memset(clientSamples, 0, sizeof(clientSamples));
|
memset(clientSamples, 0, sizeof(clientSamples));
|
||||||
|
|
||||||
const int PHASE_DELAY_AT_90 = 20;
|
|
||||||
|
|
||||||
for (AgentList::iterator otherAgent = agentList->begin(); otherAgent != agentList->end(); otherAgent++) {
|
for (AgentList::iterator otherAgent = agentList->begin(); otherAgent != agentList->end(); otherAgent++) {
|
||||||
if (((PositionalAudioRingBuffer*) otherAgent->getLinkedData())->willBeAddedToMix()
|
if (((PositionalAudioRingBuffer*) otherAgent->getLinkedData())->willBeAddedToMix()
|
||||||
&& (otherAgent != agent || (otherAgent == agent && agentRingBuffer->shouldLoopbackForAgent()))) {
|
&& (otherAgent != agent || (otherAgent == agent && agentRingBuffer->shouldLoopbackForAgent()))) {
|
||||||
|
@ -142,8 +135,6 @@ int main(int argc, const char* argv[]) {
|
||||||
int numSamplesDelay = 0;
|
int numSamplesDelay = 0;
|
||||||
float weakChannelAmplitudeRatio = 1.0f;
|
float weakChannelAmplitudeRatio = 1.0f;
|
||||||
|
|
||||||
stk::FreeVerb* otherAgentFreeVerb = NULL;
|
|
||||||
|
|
||||||
if (otherAgent != agent) {
|
if (otherAgent != agent) {
|
||||||
|
|
||||||
glm::vec3 listenerPosition = agentRingBuffer->getPosition();
|
glm::vec3 listenerPosition = agentRingBuffer->getPosition();
|
||||||
|
@ -212,6 +203,7 @@ int main(int argc, const char* argv[]) {
|
||||||
glm::normalize(rotatedSourcePosition),
|
glm::normalize(rotatedSourcePosition),
|
||||||
glm::vec3(0.0f, 1.0f, 0.0f));
|
glm::vec3(0.0f, 1.0f, 0.0f));
|
||||||
|
|
||||||
|
const int PHASE_DELAY_AT_90 = 20;
|
||||||
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
||||||
|
|
||||||
// figure out the number of samples of delay and the ratio of the amplitude
|
// figure out the number of samples of delay and the ratio of the amplitude
|
||||||
|
@ -220,40 +212,6 @@ int main(int argc, const char* argv[]) {
|
||||||
numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio;
|
numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio;
|
||||||
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
||||||
}
|
}
|
||||||
|
|
||||||
FreeVerbAgentMap& agentFreeVerbs = agentRingBuffer->getFreeVerbs();
|
|
||||||
FreeVerbAgentMap::iterator freeVerbIterator = agentFreeVerbs.find(otherAgent->getAgentID());
|
|
||||||
|
|
||||||
if (freeVerbIterator == agentFreeVerbs.end()) {
|
|
||||||
// setup the freeVerb effect for this source for this client
|
|
||||||
otherAgentFreeVerb = agentFreeVerbs[otherAgent->getAgentID()] = new stk::FreeVerb;
|
|
||||||
|
|
||||||
otherAgentFreeVerb->setDamping(DISTANCE_REVERB_DAMPING);
|
|
||||||
otherAgentFreeVerb->setRoomSize(DISTANCE_REVERB_ROOM_SIZE);
|
|
||||||
otherAgentFreeVerb->setWidth(DISTANCE_REVERB_WIDTH);
|
|
||||||
} else {
|
|
||||||
otherAgentFreeVerb = freeVerbIterator->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
const float WETNESS_DOUBLING_DISTANCE_FACTOR = 2.0f;
|
|
||||||
const float MAX_REVERB_DISTANCE = 160.0f;
|
|
||||||
|
|
||||||
// higher value increases wetness more quickly with distance
|
|
||||||
const float WETNESS_CALC_EXPONENT_BASE = 2.0f;
|
|
||||||
|
|
||||||
const float MAX_EXPONENT = logf(MAX_REVERB_DISTANCE) / logf(WETNESS_DOUBLING_DISTANCE_FACTOR);
|
|
||||||
const int MAX_EXPONENT_INT = floorf(MAX_EXPONENT);
|
|
||||||
const float DISTANCE_REVERB_LOG_REMAINDER = fmodf(MAX_EXPONENT, MAX_EXPONENT_INT);
|
|
||||||
const float DISTANCE_REVERB_MAX_WETNESS = 1.0f;
|
|
||||||
const float EFFECT_MIX_RHS = DISTANCE_REVERB_MAX_WETNESS / powf(WETNESS_DOUBLING_DISTANCE_FACTOR,
|
|
||||||
MAX_EXPONENT_INT);
|
|
||||||
|
|
||||||
float effectMix = powf(WETNESS_CALC_EXPONENT_BASE,
|
|
||||||
(0.5f * logf(distanceSquareToSource) / logf(WETNESS_CALC_EXPONENT_BASE))
|
|
||||||
- DISTANCE_REVERB_LOG_REMAINDER);
|
|
||||||
effectMix *= EFFECT_MIX_RHS;
|
|
||||||
|
|
||||||
otherAgentFreeVerb->setEffectMix(effectMix);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t* goodChannel = (bearingRelativeAngleToSource > 0.0f)
|
int16_t* goodChannel = (bearingRelativeAngleToSource > 0.0f)
|
||||||
|
@ -278,20 +236,7 @@ int main(int argc, const char* argv[]) {
|
||||||
plateauAdditionOfSamples(delayedChannel[s], earlierSample);
|
plateauAdditionOfSamples(delayedChannel[s], earlierSample);
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t currentSample = otherAgentBuffer->getNextOutput()[s];
|
int16_t currentSample = otherAgentBuffer->getNextOutput()[s] * attenuationCoefficient;
|
||||||
|
|
||||||
// apply the STK FreeVerb effect
|
|
||||||
if (otherAgentFreeVerb) {
|
|
||||||
currentSample = otherAgentFreeVerb->tick(currentSample);
|
|
||||||
|
|
||||||
if (s >= BUFFER_LENGTH_SAMPLES_PER_CHANNEL - PHASE_DELAY_AT_90) {
|
|
||||||
// there is the possiblity this will be re-used as a delayed sample
|
|
||||||
// so store the reverbed sample so that is what will be pulled
|
|
||||||
otherAgentBuffer->getNextOutput()[s] = currentSample;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
currentSample *= attenuationCoefficient;
|
|
||||||
|
|
||||||
plateauAdditionOfSamples(goodChannel[s], currentSample);
|
plateauAdditionOfSamples(goodChannel[s], currentSample);
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ int audioCallback (const void* inputBuffer,
|
||||||
glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition();
|
glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition();
|
||||||
glm::quat headOrientation = interfaceAvatar->getHead().getOrientation();
|
glm::quat headOrientation = interfaceAvatar->getHead().getOrientation();
|
||||||
|
|
||||||
int leadingBytes = 1 + sizeof(headPosition) + sizeof(headOrientation);
|
int leadingBytes = sizeof(PACKET_HEADER_MICROPHONE_AUDIO_NO_ECHO) + sizeof(headPosition) + sizeof(headOrientation);
|
||||||
|
|
||||||
// we need the amount of bytes in the buffer + 1 for type
|
// we need the amount of bytes in the buffer + 1 for type
|
||||||
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
||||||
|
|
Loading…
Reference in a new issue