latest work

This commit is contained in:
ZappoMan 2014-04-03 08:55:48 -07:00
parent 09bcada263
commit 3808cfa83d
6 changed files with 78 additions and 34 deletions

View file

@ -14,12 +14,12 @@
<translation type="unfinished"></translation>
</message>
<message>
<location filename="src/Application.cpp" line="3714"/>
<location filename="src/Application.cpp" line="3732"/>
<source>Open Script</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="src/Application.cpp" line="3715"/>
<location filename="src/Application.cpp" line="3733"/>
<source>JavaScript Files (*.js)</source>
<translation type="unfinished"></translation>
</message>
@ -113,18 +113,18 @@
<context>
<name>Menu</name>
<message>
<location filename="src/Menu.cpp" line="463"/>
<location filename="src/Menu.cpp" line="468"/>
<source>Open .ini config file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="src/Menu.cpp" line="465"/>
<location filename="src/Menu.cpp" line="477"/>
<location filename="src/Menu.cpp" line="470"/>
<location filename="src/Menu.cpp" line="482"/>
<source>Text files (*.ini)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="src/Menu.cpp" line="475"/>
<location filename="src/Menu.cpp" line="480"/>
<source>Save .ini config file</source>
<translation type="unfinished"></translation>
</message>

View file

@ -1603,7 +1603,7 @@ void Application::init() {
_audioReflector.setMyAvatar(getAvatar());
_audioReflector.setVoxels(_voxels.getTree());
_audioReflector.setAudio(getAudio());
connect(getAudio(), &Audio::processSpatialAudio, &_audioReflector, &AudioReflector::processSpatialAudio);
connect(getAudio(), &Audio::processSpatialAudio, &_audioReflector, &AudioReflector::processSpatialAudio,Qt::DirectConnection);
}
void Application::closeMirrorView() {

View file

@ -622,10 +622,11 @@ unsigned int Audio::timeValToSampleTick(const quint64 time, int sampleRate) {
return sample;
}
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& spatialAudio) {
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuffer& spatialAudio) {
// Calculate the number of remaining samples available
unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable();
if (sampleTime >= _spatialAudioFinish) {
if (_spatialAudioStart == _spatialAudioFinish) {
@ -645,7 +646,7 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& sp
unsigned int ct = delay * _desiredOutputFormat.channelCount();
unsigned int silentCt = (remaining < ct) ? remaining : ct;
if (silentCt) {
_spatialAudioRingBuffer.addSilentFrame(silentCt);
_spatialAudioRingBuffer.addSilentFrame(silentCt);
}
// Recalculate the number of remaining samples
@ -660,11 +661,13 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& sp
}
} else {
// There is overlap between the spatial audio buffer and the new sample,
// acumulate the overlap
unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount();
unsigned int accumulationCt = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount();
accumulationCt = (accumulationCt < spatialAudio.samplesAvailable()) ? accumulationCt : spatialAudio.samplesAvailable();
int j = 0;
for (int i = accumulationCt; --i >= 0; j++) {
_spatialAudioRingBuffer[j + offset] += spatialAudio[j];
@ -734,7 +737,7 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB
unsigned int sampleTime = _spatialAudioStart;
// Accumulate direct transmission of audio from sender to receiver
addSpatialAudioToBuffer(sampleTime, ringBuffer);
addSpatialAudioToBuffer(sampleTime + 48000, ringBuffer);
//addSpatialAudioToBuffer(sampleTime + 48000, ringBuffer);
// Send audio off for spatial processing
emit processSpatialAudio(sampleTime, QByteArray((char*)ringBuffer.getBuffer(), numNetworkOutputSamples), _desiredOutputFormat);
@ -744,6 +747,7 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
int samples = ringBuffer.samplesAvailable();
_spatialAudioStart += samples / _desiredOutputFormat.channelCount();
ringBuffer.reset();
} else {

View file

@ -78,7 +78,7 @@ public slots:
void start();
void stop();
void addReceivedAudioToBuffer(const QByteArray& audioByteArray);
void addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& spatialAudio);
void addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuffer& spatialAudio);
void handleAudioInput();
void reset();
void toggleMute();

View file

@ -37,7 +37,7 @@ int getDelayFromDistance(float distance) {
return MS_DELAY_PER_METER * distance;
}
const float BOUNCE_ATTENUATION_FACTOR = 0.5f;
const float BOUNCE_ATTENUATION_FACTOR = 0.125f;
float getDistanceAttenuationCoefficient(float distance) {
const float DISTANCE_SCALE = 2.5f;
@ -99,6 +99,9 @@ void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& o
}
}
// set up our buffers for our attenuated and delayed samples
const int NUMBER_OF_CHANNELS = 2;
void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection,
int bounces, const QByteArray& originalSamples,
@ -115,14 +118,20 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve
BoxFace face;
const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point
// set up our buffers for our attenuated and delayed samples
const int NUMBER_OF_CHANNELS = 2;
int totalNumberOfSamples = originalSamples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS);
int totalNumberOfSamples = originalSamples.size() / sizeof(int16_t);
int totalNumberOfStereoSamples = originalSamples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS);
const int16_t* originalSamplesData = (const int16_t*)originalSamples.constData();
AudioRingBuffer attenuatedLeftSamples(totalNumberOfSamples);
AudioRingBuffer attenuatedRightSamples(totalNumberOfSamples);
QByteArray attenuatedLeftSamples;
QByteArray attenuatedRightSamples;
attenuatedLeftSamples.resize(originalSamples.size());
attenuatedRightSamples.resize(originalSamples.size());
int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data();
int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data();
AudioRingBuffer attenuatedLeftBuffer(totalNumberOfSamples);
AudioRingBuffer attenuatedRightBuffer(totalNumberOfSamples);
for (int bounceNumber = 1; bounceNumber <= bounces; bounceNumber++) {
@ -138,26 +147,25 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve
float leftEarDistance = glm::distance(end, leftEarPosition);
int rightEarDelayMsecs = getDelayFromDistance(rightEarDistance);
int leftEarDelayMsecs = getDelayFromDistance(leftEarDistance);
int rightEarDelay = rightEarDelayMsecs / MSECS_PER_SECOND * sampleRate;
int leftEarDelay = leftEarDelayMsecs / MSECS_PER_SECOND * sampleRate;
int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
float rightEarAttenuation = getDistanceAttenuationCoefficient(rightEarDistance) *
(bounceNumber * BOUNCE_ATTENUATION_FACTOR);
float leftEarAttenuation = getDistanceAttenuationCoefficient(leftEarDistance) *
(bounceNumber * BOUNCE_ATTENUATION_FACTOR);
float rightEarAttenuation = getDistanceAttenuationCoefficient(rightEarDistance) * (bounceNumber * BOUNCE_ATTENUATION_FACTOR);
float leftEarAttenuation = getDistanceAttenuationCoefficient(leftEarDistance) * (bounceNumber * BOUNCE_ATTENUATION_FACTOR);
//qDebug() << "leftEarAttenuation=" << leftEarAttenuation << "rightEarAttenuation=" << rightEarAttenuation;
// run through the samples, and attenuate them
for (int sample = 0; sample < totalNumberOfSamples; sample++) {
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
int16_t rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
//qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample;
attenuatedLeftSamples[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
attenuatedLeftSamples[sample * NUMBER_OF_CHANNELS + 1] = 0;
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
attenuatedRightSamples[sample * NUMBER_OF_CHANNELS] = 0;
attenuatedRightSamples[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
//qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation);
@ -169,15 +177,47 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve
unsigned int sampleTimeLeft = sampleTime + leftEarDelay;
unsigned int sampleTimeRight = sampleTime + rightEarDelay;
qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight;
//qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight;
attenuatedLeftBuffer.writeSamples(attenuatedLeftSamplesData, totalNumberOfSamples);
attenuatedRightBuffer.writeSamples(attenuatedRightSamplesData, totalNumberOfSamples);
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples);
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples);
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftBuffer);
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightBuffer);
attenuatedLeftBuffer.reset();
attenuatedRightBuffer.reset();
}
}
}
void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
//qDebug() << "AudioReflector::processSpatialAudio()...sampleTime=" << sampleTime << " threadID=" << QThread::currentThreadId();
/*
int totalNumberOfSamples = samples.size() / (sizeof(int16_t));
int numFrameSamples = format.sampleRate() * format.channelCount();
qDebug() << " totalNumberOfSamples=" << totalNumberOfSamples;
qDebug() << " numFrameSamples=" << numFrameSamples;
qDebug() << " samples.size()=" << samples.size();
qDebug() << " sizeof(int16_t)=" << sizeof(int16_t);
AudioRingBuffer samplesRingBuffer(totalNumberOfSamples);
qint64 bytesCopied = samplesRingBuffer.writeData(samples.constData(),samples.size());
for(int i = 0; i < totalNumberOfSamples; i++) {
samplesRingBuffer[i] = samplesRingBuffer[i] * 0.25f;
}
qDebug() << " bytesCopied=" << bytesCopied;
_audio->addSpatialAudioToBuffer(sampleTime + 12000, samplesRingBuffer);
return;
*/
quint64 start = usecTimestampNow();
glm::vec3 origin = _myAvatar->getHead()->getPosition();

View file

@ -47,8 +47,8 @@ public:
int parseData(const QByteArray& packet);
// assume callers using this will never wrap around the end
const int16_t* getNextOutput() { return _nextOutput; }
const int16_t* getBuffer() { return _buffer; }
const int16_t* getNextOutput() const { return _nextOutput; }
const int16_t* getBuffer() const { return _buffer; }
qint64 readSamples(int16_t* destination, qint64 maxSamples);
qint64 writeSamples(const int16_t* source, qint64 maxSamples);