Merge pull request #9125 from kencooke/audio-ambisonics

First-order Ambisonic renderer
This commit is contained in:
David Kelly 2016-12-12 11:36:12 -08:00 committed by GitHub
commit 8719d36cd9
15 changed files with 20386 additions and 52 deletions

View file

@ -1098,13 +1098,37 @@ void AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
for (AudioInjector* injector : getActiveLocalAudioInjectors()) {
if (injector->getLocalBuffer()) {
qint64 samplesToRead = injector->isStereo() ? AudioConstants::NETWORK_FRAME_BYTES_STEREO : AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
static const int HRTF_DATASET_INDEX = 1;
// get one frame from the injector (mono or stereo)
memset(_scratchBuffer, 0, sizeof(_scratchBuffer));
if (0 < injector->getLocalBuffer()->readData((char*)_scratchBuffer, samplesToRead)) {
int numChannels = injector->isAmbisonic() ? AudioConstants::AMBISONIC : (injector->isStereo() ? AudioConstants::STEREO : AudioConstants::MONO);
qint64 bytesToRead = numChannels * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
// get one frame from the injector
memset(_scratchBuffer, 0, bytesToRead);
if (0 < injector->getLocalBuffer()->readData((char*)_scratchBuffer, bytesToRead)) {
if (injector->isStereo()) {
if (injector->isAmbisonic()) {
// no distance attenuation
float gain = injector->getVolume();
//
// Calculate the soundfield orientation relative to the listener.
// Injector orientation can be used to align a recording to our world coordinates.
//
glm::quat relativeOrientation = injector->getOrientation() * glm::inverse(_orientationGetter());
// convert from Y-up (OpenGL) to Z-up (Ambisonic) coordinate system
float qw = relativeOrientation.w;
float qx = -relativeOrientation.z;
float qy = -relativeOrientation.x;
float qz = relativeOrientation.y;
// Ambisonic gets spatialized into mixBuffer
injector->getLocalFOA().render(_scratchBuffer, mixBuffer, HRTF_DATASET_INDEX,
qw, qx, qy, qz, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
} else if (injector->isStereo()) {
// stereo gets directly mixed into mixBuffer
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i++) {
@ -1120,7 +1144,8 @@ void AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
float azimuth = azimuthForSource(relativePosition);
// mono gets spatialized into mixBuffer
injector->getLocalHRTF().render(_scratchBuffer, mixBuffer, 1, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
injector->getLocalHRTF().render(_scratchBuffer, mixBuffer, HRTF_DATASET_INDEX,
azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
} else {
@ -1225,8 +1250,7 @@ void AudioClient::setIsStereoInput(bool isStereoInput) {
}
}
bool AudioClient::outputLocalInjector(bool isStereo, AudioInjector* injector) {
bool AudioClient::outputLocalInjector(AudioInjector* injector) {
Lock lock(_injectorsMutex);
if (injector->getLocalBuffer() && _audioInput ) {
// just add it to the vector of active local injectors, if

View file

@ -169,7 +169,7 @@ public slots:
int setOutputBufferSize(int numFrames, bool persist = true);
bool outputLocalInjector(bool isStereo, AudioInjector* injector) override;
bool outputLocalInjector(AudioInjector* injector) override;
bool shouldLoopbackInjectors() override { return _shouldEchoToServer; }
bool switchInputToAudioDevice(const QString& inputDeviceName);
@ -297,7 +297,7 @@ private:
// for local hrtf-ing
float _mixBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
int16_t _scratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
int16_t _scratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
AudioLimiter _audioLimiter;
// Adds Reverb

View file

@ -32,7 +32,7 @@ public:
PacketType packetType, QString codecName = QString(""));
public slots:
virtual bool outputLocalInjector(bool isStereo, AudioInjector* injector) = 0;
virtual bool outputLocalInjector(AudioInjector* injector) = 0;
virtual bool shouldLoopbackInjectors() { return false; }
virtual void setIsStereoInput(bool stereo) = 0;

View file

@ -20,7 +20,7 @@ namespace AudioConstants {
const int SAMPLE_RATE = 24000;
const int MONO = 1;
const int STEREO = 2;
const int AMBISONIC = 4;
typedef int16_t AudioSample;
const int SAMPLE_SIZE = sizeof(AudioSample);
@ -33,6 +33,7 @@ namespace AudioConstants {
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / SAMPLE_SIZE;
const int NETWORK_FRAME_BYTES_PER_CHANNEL = NETWORK_FRAME_BYTES_STEREO / 2;
const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / SAMPLE_SIZE;
const int NETWORK_FRAME_SAMPLES_AMBISONIC = NETWORK_FRAME_SAMPLES_PER_CHANNEL * AMBISONIC;
const float NETWORK_FRAME_SECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL / float(AudioConstants::SAMPLE_RATE));
const float NETWORK_FRAME_MSECS = NETWORK_FRAME_SECS * 1000.0f;
const float NETWORK_FRAMES_PER_SEC = 1.0f / NETWORK_FRAME_SECS;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,62 @@
//
// AudioFOA.h
// libraries/audio/src
//
// Created by Ken Cooke on 10/28/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioFOA_h
#define hifi_AudioFOA_h
#include <stdint.h>
static const int FOA_TAPS = 273; // FIR coefs
static const int FOA_NFFT = 512; // FFT length
static const int FOA_OVERLAP = FOA_TAPS - 1;
static const int FOA_TABLES = 25; // number of HRTF subjects
static const int FOA_BLOCK = 240; // block processing size
static const float FOA_GAIN = 1.0f; // FOA global gain adjustment
static_assert((FOA_BLOCK + FOA_OVERLAP) == FOA_NFFT, "FFT convolution requires L+M-1 == NFFT");
class AudioFOA {
public:
AudioFOA() {
// identity matrix
_rotationState[0][0] = 1.0f;
_rotationState[1][1] = 1.0f;
_rotationState[2][2] = 1.0f;
};
//
// input: interleaved First-Order Ambisonic source
// output: interleaved stereo mix buffer (accumulates into existing output)
// index: HRTF subject index
// qw, qx, qy, qz: normalized quaternion for orientation
// gain: gain factor for volume control
// numFrames: must be FOA_BLOCK in this version
//
void render(int16_t* input, float* output, int index, float qw, float qx, float qy, float qz, float gain, int numFrames);
private:
AudioFOA(const AudioFOA&) = delete;
AudioFOA& operator=(const AudioFOA&) = delete;
// For best cache utilization when processing thousands of instances, only
// the minimum persistant state is stored here. No coefs or work buffers.
// input history, for overlap-save
float _fftState[4][FOA_OVERLAP] = {};
// orientation history
float _rotationState[3][3] = {};
};
#endif // AudioFOA_h

File diff suppressed because it is too large Load diff

View file

@ -58,8 +58,10 @@ void AudioInjector::setOptions(const AudioInjectorOptions& options) {
// since options.stereo is computed from the audio stream,
// we need to copy it from existing options just in case.
bool currentlyStereo = _options.stereo;
bool currentlyAmbisonic = _options.ambisonic;
_options = options;
_options.stereo = currentlyStereo;
_options.ambisonic = currentlyAmbisonic;
}
void AudioInjector::finishNetworkInjection() {
@ -134,7 +136,8 @@ bool AudioInjector::inject(bool(AudioInjectorManager::*injection)(AudioInjector*
int byteOffset = 0;
if (_options.secondOffset > 0.0f) {
byteOffset = (int)floorf(AudioConstants::SAMPLE_RATE * _options.secondOffset * (_options.stereo ? 2.0f : 1.0f));
int numChannels = _options.ambisonic ? 4 : (_options.stereo ? 2 : 1);
byteOffset = (int)(AudioConstants::SAMPLE_RATE * _options.secondOffset * numChannels);
byteOffset *= sizeof(AudioConstants::SAMPLE_SIZE);
}
_currentSendOffset = byteOffset;
@ -169,7 +172,7 @@ bool AudioInjector::injectLocally() {
_localBuffer->setCurrentOffset(_currentSendOffset);
// call this function on the AudioClient's thread
success = QMetaObject::invokeMethod(_localAudioInterface, "outputLocalInjector", Q_ARG(bool, _options.stereo), Q_ARG(AudioInjector*, this));
success = QMetaObject::invokeMethod(_localAudioInterface, "outputLocalInjector", Q_ARG(AudioInjector*, this));
if (!success) {
qCDebug(audio) << "AudioInjector::injectLocally could not output locally via _localAudioInterface";

View file

@ -27,6 +27,7 @@
#include "AudioInjectorLocalBuffer.h"
#include "AudioInjectorOptions.h"
#include "AudioHRTF.h"
#include "AudioFOA.h"
#include "Sound.h"
class AbstractAudioInterface;
@ -59,11 +60,14 @@ public:
AudioInjectorLocalBuffer* getLocalBuffer() const { return _localBuffer; }
AudioHRTF& getLocalHRTF() { return _localHRTF; }
AudioFOA& getLocalFOA() { return _localFOA; }
bool isLocalOnly() const { return _options.localOnly; }
float getVolume() const { return _options.volume; }
glm::vec3 getPosition() const { return _options.position; }
glm::quat getOrientation() const { return _options.orientation; }
bool isStereo() const { return _options.stereo; }
bool isAmbisonic() const { return _options.ambisonic; }
bool stateHas(AudioInjectorState state) const ;
static void setLocalAudioInterface(AbstractAudioInterface* audioInterface) { _localAudioInterface = audioInterface; }
@ -113,6 +117,7 @@ private:
// when the injector is local, we need this
AudioHRTF _localHRTF;
AudioFOA _localFOA;
friend class AudioInjectorManager;
};

View file

@ -19,6 +19,7 @@ AudioInjectorOptions::AudioInjectorOptions() :
loop(false),
orientation(glm::vec3(0.0f, 0.0f, 0.0f)),
stereo(false),
ambisonic(false),
ignorePenumbra(false),
localOnly(false),
secondOffset(0.0)

View file

@ -25,6 +25,7 @@ public:
bool loop;
glm::quat orientation;
bool stereo;
bool ambisonic;
bool ignorePenumbra;
bool localOnly;
float secondOffset;

View file

@ -43,9 +43,10 @@ SoundScriptingInterface::SoundScriptingInterface(SharedSoundPointer sound) : _so
QObject::connect(sound.data(), &Sound::ready, this, &SoundScriptingInterface::ready);
}
Sound::Sound(const QUrl& url, bool isStereo) :
Sound::Sound(const QUrl& url, bool isStereo, bool isAmbisonic) :
Resource(url),
_isStereo(isStereo),
_isAmbisonic(isAmbisonic),
_isReady(false)
{
@ -62,8 +63,10 @@ void Sound::downloadFinished(const QByteArray& data) {
QByteArray outputAudioByteArray;
interpretAsWav(rawAudioByteArray, outputAudioByteArray);
downSample(outputAudioByteArray);
int sampleRate = interpretAsWav(rawAudioByteArray, outputAudioByteArray);
if (sampleRate != 0) {
downSample(outputAudioByteArray, sampleRate);
}
} else if (fileName.endsWith(RAW_EXTENSION)) {
// check if this was a stereo raw file
// since it's raw the only way for us to know that is if the file was called .stereo.raw
@ -72,8 +75,8 @@ void Sound::downloadFinished(const QByteArray& data) {
qCDebug(audio) << "Processing sound of" << rawAudioByteArray.size() << "bytes from" << getURL() << "as stereo audio file.";
}
// Process as RAW file
downSample(rawAudioByteArray);
// Process as 48khz RAW file
downSample(rawAudioByteArray, 48000);
} else {
qCDebug(audio) << "Unknown sound file type";
}
@ -84,29 +87,80 @@ void Sound::downloadFinished(const QByteArray& data) {
emit ready();
}
void Sound::downSample(const QByteArray& rawAudioByteArray) {
// assume that this was a RAW file and is now an array of samples that are
// signed, 16-bit, 48Khz
void Sound::downSample(const QByteArray& rawAudioByteArray, int sampleRate) {
// we want to convert it to the format that the audio-mixer wants
// which is signed, 16-bit, 24Khz
int numChannels = _isStereo ? 2 : 1;
AudioSRC resampler(48000, AudioConstants::SAMPLE_RATE, numChannels);
if (sampleRate == AudioConstants::SAMPLE_RATE) {
// resize to max possible output
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
_byteArray.resize(maxDestinationBytes);
// no resampling needed
_byteArray = rawAudioByteArray;
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
(int16_t*)_byteArray.data(),
numSourceFrames);
} else if (_isAmbisonic) {
// truncate to actual output
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
_byteArray.resize(numDestinationBytes);
// FIXME: add a proper Ambisonic resampler!
int numChannels = 4;
AudioSRC resampler[4] { {sampleRate, AudioConstants::SAMPLE_RATE, 1},
{sampleRate, AudioConstants::SAMPLE_RATE, 1},
{sampleRate, AudioConstants::SAMPLE_RATE, 1},
{sampleRate, AudioConstants::SAMPLE_RATE, 1} };
// resize to max possible output
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
int maxDestinationFrames = resampler[0].getMaxOutput(numSourceFrames);
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
_byteArray.resize(maxDestinationBytes);
int numDestinationFrames = 0;
// iterate over channels
int16_t* srcBuffer = new int16_t[numSourceFrames];
int16_t* dstBuffer = new int16_t[maxDestinationFrames];
for (int ch = 0; ch < 4; ch++) {
int16_t* src = (int16_t*)rawAudioByteArray.data();
int16_t* dst = (int16_t*)_byteArray.data();
// deinterleave samples
for (int i = 0; i < numSourceFrames; i++) {
srcBuffer[i] = src[4*i + ch];
}
// resample one channel
numDestinationFrames = resampler[ch].render(srcBuffer, dstBuffer, numSourceFrames);
// reinterleave samples
for (int i = 0; i < numDestinationFrames; i++) {
dst[4*i + ch] = dstBuffer[i];
}
}
delete[] srcBuffer;
delete[] dstBuffer;
// truncate to actual output
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
_byteArray.resize(numDestinationBytes);
} else {
int numChannels = _isStereo ? 2 : 1;
AudioSRC resampler(sampleRate, AudioConstants::SAMPLE_RATE, numChannels);
// resize to max possible output
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
_byteArray.resize(maxDestinationBytes);
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
(int16_t*)_byteArray.data(),
numSourceFrames);
// truncate to actual output
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
_byteArray.resize(numDestinationBytes);
}
}
//
@ -160,7 +214,8 @@ struct CombinedHeader {
WAVEHeader wave;
};
void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
// returns wavfile sample rate, used for resampling
int Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
CombinedHeader fileHeader;
@ -174,36 +229,35 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
// descriptor.id == "RIFX" also signifies BigEndian file
// waveStream.setByteOrder(QDataStream::BigEndian);
qCDebug(audio) << "Currently not supporting big-endian audio files.";
return;
return 0;
}
if (strncmp(fileHeader.riff.type, "WAVE", 4) != 0
|| strncmp(fileHeader.wave.descriptor.id, "fmt", 3) != 0) {
qCDebug(audio) << "Not a WAVE Audio file.";
return;
return 0;
}
// added the endianess check as an extra level of security
if (qFromLittleEndian<quint16>(fileHeader.wave.audioFormat) != 1) {
qCDebug(audio) << "Currently not supporting non PCM audio files.";
return;
return 0;
}
if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) == 2) {
_isStereo = true;
} else if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) > 2) {
qCDebug(audio) << "Currently not support audio files with more than 2 channels.";
} else if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) == 4) {
_isAmbisonic = true;
} else if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) != 1) {
qCDebug(audio) << "Currently not support audio files with other than 1/2/4 channels.";
return 0;
}
if (qFromLittleEndian<quint16>(fileHeader.wave.bitsPerSample) != 16) {
qCDebug(audio) << "Currently not supporting non 16bit audio files.";
return;
return 0;
}
if (qFromLittleEndian<quint32>(fileHeader.wave.sampleRate) != 48000) {
qCDebug(audio) << "Currently not supporting non 48KHz audio files.";
return;
}
// Skip any extra data in the WAVE chunk
waveStream.skipRawData(fileHeader.wave.descriptor.size - (sizeof(WAVEHeader) - sizeof(chunk)));
@ -218,7 +272,7 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
waveStream.skipRawData(dataHeader.descriptor.size);
} else {
qCDebug(audio) << "Could not read wav audio data header.";
return;
return 0;
}
}
@ -227,12 +281,14 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
outputAudioByteArray.resize(outputAudioByteArraySize);
if (waveStream.readRawData(outputAudioByteArray.data(), outputAudioByteArraySize) != (int)outputAudioByteArraySize) {
qCDebug(audio) << "Error reading WAV file";
return 0;
}
_duration = (float) (outputAudioByteArraySize / (fileHeader.wave.sampleRate * fileHeader.wave.numChannels * fileHeader.wave.bitsPerSample / 8.0f));
return fileHeader.wave.sampleRate;
} else {
qCDebug(audio) << "Could not read wav audio file header.";
return;
return 0;
}
}

View file

@ -22,9 +22,10 @@ class Sound : public Resource {
Q_OBJECT
public:
Sound(const QUrl& url, bool isStereo = false);
Sound(const QUrl& url, bool isStereo = false, bool isAmbisonic = false);
bool isStereo() const { return _isStereo; }
bool isAmbisonic() const { return _isAmbisonic; }
bool isReady() const { return _isReady; }
float getDuration() const { return _duration; }
@ -37,11 +38,12 @@ signals:
private:
QByteArray _byteArray;
bool _isStereo;
bool _isAmbisonic;
bool _isReady;
float _duration; // In seconds
void downSample(const QByteArray& rawAudioByteArray);
void interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
void downSample(const QByteArray& rawAudioByteArray, int sampleRate);
int interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
virtual void downloadFinished(const QByteArray& data) override;
};

File diff suppressed because it is too large Load diff

View file

@ -45,6 +45,9 @@ ScriptAudioInjector* AudioScriptingInterface::playSound(SharedSoundPointer sound
// stereo option isn't set from script, this comes from sound metadata or filename
AudioInjectorOptions optionsCopy = injectorOptions;
optionsCopy.stereo = sound->isStereo();
optionsCopy.ambisonic = sound->isAmbisonic();
optionsCopy.localOnly = optionsCopy.localOnly || sound->isAmbisonic(); // force localOnly when Ambisonic
auto injector = AudioInjector::playSound(sound->getByteArray(), optionsCopy);
if (!injector) {
return NULL;