out of line inline members for coding standard

This commit is contained in:
Craig Hansen-Sturm 2014-09-20 05:46:27 -07:00
parent 90379ee7eb
commit 1b2ee4023a
6 changed files with 456 additions and 374 deletions

View file

@ -26,186 +26,216 @@ protected:
T** _frameBuffer;
void allocateFrames() {
_frameBuffer = new T*[_channelCountMax];
if (_frameBuffer) {
for (uint32_t i = 0; i < _channelCountMax; ++i) {
_frameBuffer[i] = new T[_frameCountMax];
}
}
}
void deallocateFrames() {
if (_frameBuffer) {
for (uint32_t i = 0; i < _channelCountMax; ++i) {
delete _frameBuffer[i];
}
delete _frameBuffer;
}
_frameBuffer = NULL;
}
void allocateFrames();
void deallocateFrames();
public:
AudioFrameBuffer() :
_channelCount(0),
_frameCount(0),
_frameCountMax(0),
_frameBuffer(NULL) {
}
AudioFrameBuffer();
AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount);
virtual ~AudioFrameBuffer();
AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount) :
_channelCount(channelCount),
_channelCountMax(channelCount),
_frameCount(frameCount),
_frameCountMax(frameCount),
_frameBuffer(NULL) {
allocateFrames();
void initialize(const uint32_t channelCount, const uint32_t frameCount);
void finalize();
T**& getFrameData();
uint32_t getChannelCount();
uint32_t getFrameCount();
template< typename S >
void copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut = false);
void zeroFrames();
};
template< typename T >
AudioFrameBuffer< T >::AudioFrameBuffer() :
_channelCount(0),
_frameCount(0),
_frameCountMax(0),
_frameBuffer(NULL) {
}
template< typename T >
AudioFrameBuffer< T >::AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount) :
_channelCount(channelCount),
_channelCountMax(channelCount),
_frameCount(frameCount),
_frameCountMax(frameCount),
_frameBuffer(NULL) {
allocateFrames();
}
template< typename T >
AudioFrameBuffer< T >::~AudioFrameBuffer() {
finalize();
}
template< typename T >
void AudioFrameBuffer< T >::allocateFrames() {
_frameBuffer = new T*[_channelCountMax];
if (_frameBuffer) {
for (uint32_t i = 0; i < _channelCountMax; ++i) {
_frameBuffer[i] = new T[_frameCountMax];
}
}
virtual ~AudioFrameBuffer() {
}
template< typename T >
void AudioFrameBuffer< T >::deallocateFrames() {
if (_frameBuffer) {
for (uint32_t i = 0; i < _channelCountMax; ++i) {
delete _frameBuffer[i];
}
delete _frameBuffer;
}
_frameBuffer = NULL;
}
template< typename T >
void AudioFrameBuffer< T >::initialize(const uint32_t channelCount, const uint32_t frameCount) {
if (_frameBuffer) {
finalize();
}
_channelCount = channelCount;
_channelCountMax = channelCount;
_frameCount = frameCount;
_frameCountMax = frameCount;
allocateFrames();
}
template< typename T >
void AudioFrameBuffer< T >::finalize() {
deallocateFrames();
_channelCount = 0;
_channelCountMax = 0;
_frameCount = 0;
_frameCountMax = 0;
}
template< typename T >
inline T**& AudioFrameBuffer< T >::getFrameData() {
return _frameBuffer;
}
template< typename T >
inline uint32_t AudioFrameBuffer< T >::getChannelCount() {
return _channelCount;
}
template< typename T >
inline uint32_t AudioFrameBuffer< T >::getFrameCount() {
return _frameCount;
}
template< typename T >
inline void AudioFrameBuffer< T >::zeroFrames() {
if (!_frameBuffer) {
return;
}
for (uint32_t i = 0; i < _channelCountMax; ++i) {
memset(_frameBuffer[i], 0, sizeof(T)*_frameCountMax);
}
}
template< typename T >
template< typename S >
inline void AudioFrameBuffer< T >::copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut) {
if ( !_frameBuffer || !frames) {
return;
}
void initialize(const uint32_t channelCount, const uint32_t frameCount) {
if (_frameBuffer) {
finalize();
}
_channelCount = channelCount;
_channelCountMax = channelCount;
if (channelCount <=_channelCountMax && frameCount <=_frameCountMax) {
// We always allow copying fewer frames than we have allocated
_frameCount = frameCount;
_frameCountMax = frameCount;
allocateFrames();
_channelCount = channelCount;
}
else {
//
// However we do not attempt to copy more frames than we've allocated ;-) This is a framing error caused by either
// a/ the platform audio driver not correctly queuing and regularly smoothing device IO capture frames -or-
// b/ our IO processing thread (currently running on a Qt GUI thread) has been delayed/scheduled too late.
//
// The fix is not to make the problem worse by allocating additional frames on this thread, rather, it is to handle
// dynamic re-sizing off the IO processing thread. While a/ is not in our control, we will address the off thread
// re-sizing,, as well as b/, in later releases.
//
// For now, we log this condition, and do our best to recover by copying as many frames as we have allocated.
// Unfortunately, this will result (temporarily), in an audible discontinuity.
//
// If you repeatedly receive this error, contact craig@highfidelity.io and send me what audio device you are using,
// what audio-stack you are using (pulse/alsa, core audio, ...), what OS, and what the reported frame/channel
// counts are. In addition, any information about what you were doing at the time of the discontinuity, would be
// useful (e.g., accessing any client features/menus)
//
qDebug() << "Audio framing error: _channelCount="
<< _channelCount
<< "channelCountMax="
<< _channelCountMax
<< "_frameCount="
<< _frameCount
<< "frameCountMax="
<< _frameCountMax;
_channelCount = std::min(_channelCount,_channelCountMax);
_frameCount = std::min(_frameCount,_frameCountMax);
}
void finalize() {
deallocateFrames();
_channelCount = 0;
_channelCountMax = 0;
_frameCount = 0;
_frameCountMax = 0;
}
T**& getFrameData() {
return _frameBuffer;
}
uint32_t getChannelCount() {
return _channelCount;
}
uint32_t getFrameCount() {
return _frameCount;
}
void zeroFrames() {
if (!_frameBuffer) {
return;
}
for (uint32_t i = 0; i < _channelCountMax; ++i) {
memset(_frameBuffer[i], 0, sizeof(T)*_frameCountMax);
}
}
template< typename S >
void copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut = false) {
if ( !_frameBuffer || !frames) {
return;
}
if (channelCount <=_channelCountMax && frameCount <=_frameCountMax) {
// We always allow copying fewer frames than we have allocated
_frameCount = frameCount;
_channelCount = channelCount;
if (copyOut) {
S* dst = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = _frameBuffer[j][i];
}
}
}
else {
//
// However we do not attempt to copy more frames than we've allocated ;-) This is a framing error caused by either
// a/ the platform audio driver not correctly queuing and regularly smoothing device IO capture frames -or-
// b/ our IO processing thread (currently running on a Qt GUI thread) has been delayed/scheduled too late.
//
// The fix is not to make the problem worse by allocating additional frames on this thread, rather, it is to handle
// dynamic re-sizing off the IO processing thread. While a/ is not in our control, we will address the off thread
// re-sizing,, as well as b/, in later releases.
//
// For now, we log this condition, and do our best to recover by copying as many frames as we have allocated.
// Unfortunately, this will result (temporarily), in an audible discontinuity.
//
// If you repeatedly receive this error, contact craig@highfidelity.io and send me what audio device you are using,
// what audio-stack you are using (pulse/alsa, core audio, ...), what OS, and what the reported frame/channel
// counts are. In addition, any information about what you were doing at the time of the discontinuity, would be
// useful (e.g., accessing any client features/menus)
//
qDebug() << "Audio framing error: _channelCount="
<< _channelCount
<< "channelCountMax="
<< _channelCountMax
<< "_frameCount="
<< _frameCount
<< "frameCountMax="
<< _frameCountMax;
_channelCount = std::min(_channelCount,_channelCountMax);
_frameCount = std::min(_frameCount,_frameCountMax);
}
if (copyOut) {
S* dst = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) {
const int scale = (2 << ((8 * sizeof(S)) - 1));
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = _frameBuffer[j][i];
*dst++ = (S)(_frameBuffer[j][i] * scale);
}
}
}
else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) {
const int scale = (2 << ((8 * sizeof(S)) - 1));
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = (S)(_frameBuffer[j][i] * scale);
}
}
}
else {
assert(0); // currently unsupported conversion
}
}
}
else { // copyIn
S* src = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = *src++;
}
}
}
else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) {
const int scale = (2 << ((8 * sizeof(S)) - 1));
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = ((T)(*src++)) / scale;
}
}
}
else {
assert(0); // currently unsupported conversion
}
assert(0); // currently unsupported conversion
}
}
}
};
else { // copyIn
S* src = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = *src++;
}
}
}
else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) {
const int scale = (2 << ((8 * sizeof(S)) - 1));
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = ((T)(*src++)) / scale;
}
}
}
else {
assert(0); // currently unsupported conversion
}
}
}
}
typedef AudioFrameBuffer< float32_t > AudioBufferFloat32;
typedef AudioFrameBuffer< int32_t > AudioBufferSInt32;

View file

@ -13,21 +13,13 @@
#define hifi_AudioEditBuffer_h
template< typename T >
class AudioEditBuffer
: public AudioFrameBuffer<T> {
class AudioEditBuffer : public AudioFrameBuffer<T> {
public:
AudioEditBuffer() :
AudioFrameBuffer<T>() {
}
AudioEditBuffer(const uint32_t channelCount, const uint32_t frameCount) :
AudioFrameBuffer<T>(channelCount, frameCount) {
}
~AudioEditBuffer() {
}
AudioEditBuffer();
AudioEditBuffer(const uint32_t channelCount, const uint32_t frameCount);
~AudioEditBuffer();
bool getZeroCrossing(uint32_t start, bool direction, float32_t epsilon, uint32_t& zero);
@ -36,7 +28,21 @@ public:
};
template< typename T >
bool AudioEditBuffer<T>::getZeroCrossing(uint32_t start, bool direction, float32_t epsilon, uint32_t& zero) {
AudioEditBuffer<T>::AudioEditBuffer() :
AudioFrameBuffer<T>() {
}
template< typename T >
AudioEditBuffer<T>::AudioEditBuffer(const uint32_t channelCount, const uint32_t frameCount) :
AudioFrameBuffer<T>(channelCount, frameCount) {
}
template< typename T >
AudioEditBuffer<T>::~AudioEditBuffer() {
}
template< typename T >
inline bool AudioEditBuffer<T>::getZeroCrossing(uint32_t start, bool direction, float32_t epsilon, uint32_t& zero) {
zero = this->_frameCount;
@ -69,7 +75,7 @@ bool AudioEditBuffer<T>::getZeroCrossing(uint32_t start, bool direction, float32
}
template< typename T >
void AudioEditBuffer<T>::linearFade(uint32_t start, uint32_t stop, bool slope) {
inline void AudioEditBuffer<T>::linearFade(uint32_t start, uint32_t stop, bool slope) {
if (start >= stop || start > this->_frameCount || stop > this->_frameCount ) {
return;
@ -97,7 +103,7 @@ void AudioEditBuffer<T>::linearFade(uint32_t start, uint32_t stop, bool slope) {
}
template< typename T >
void AudioEditBuffer<T>::exponentialFade(uint32_t start, uint32_t stop, bool slope) {
inline void AudioEditBuffer<T>::exponentialFade(uint32_t start, uint32_t stop, bool slope) {
// TBD
}

View file

@ -0,0 +1,48 @@
//
// AudioGain.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 9/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <math.h>
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioGain.h"
AudioGain::AudioGain() {
initialize();
}
AudioGain::~AudioGain() {
finalize();
}
void AudioGain::initialize() {
setParameters(1.0f,0.0f);
}
void AudioGain::finalize() {
}
void AudioGain::reset() {
initialize();
}
void AudioGain::setParameters(const float gain, const float mute) {
_gain = std::min(std::max(gain, 0.0f), 1.0f);
_mute = mute != 0.0f;
}
void AudioGain::getParameters(float& gain, float& mute) {
gain = _gain;
mute = _mute ? 1.0f : 0.0f;
}

View file

@ -18,119 +18,102 @@ class AudioGain
bool _mute;
public:
AudioGain() {
initialize();
}
AudioGain();
~AudioGain();
~AudioGain() {
finalize();
}
void initialize();
void finalize();
void reset();
void initialize() {
setParameters(1.0f,0.0f);
}
void setParameters(const float gain, const float mute);
void getParameters(float& gain, float& mute);
void finalize() {
}
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioGain::render(AudioBufferFloat32& frameBuffer) {
if (_mute) {
frameBuffer.zeroFrames();
return;
}
void reset() {
initialize();
}
float32_t** samples = frameBuffer.getFrameData();
void setParameters(const float gain, const float mute) {
_gain = std::min(std::max(gain, 0.0f), 1.0f);
_mute = mute != 0.0f;
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
}
void getParameters(float& gain, float& mute) {
gain = _gain;
mute = _mute ? 1.0f : 0.0f;
}
void render(AudioBufferFloat32& frameBuffer) {
if (_mute) {
frameBuffer.zeroFrames();
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 1) {
if (frameBuffer.getChannelCount() == 1) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
}
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
}
else if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
samples[1][i + 0] *= _gain;
samples[1][i + 1] *= _gain;
samples[1][i + 2] *= _gain;
samples[1][i + 3] *= _gain;
samples[1][i + 4] *= _gain;
samples[1][i + 5] *= _gain;
samples[1][i + 6] *= _gain;
samples[1][i + 7] *= _gain;
samples[1][i + 8] *= _gain;
samples[1][i + 9] *= _gain;
samples[1][i + 10] *= _gain;
samples[1][i + 11] *= _gain;
samples[1][i + 12] *= _gain;
samples[1][i + 13] *= _gain;
samples[1][i + 14] *= _gain;
samples[1][i + 15] *= _gain;
}
}
else {
assert("unsupported channel format");
}
else if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
samples[1][i + 0] *= _gain;
samples[1][i + 1] *= _gain;
samples[1][i + 2] *= _gain;
samples[1][i + 3] *= _gain;
samples[1][i + 4] *= _gain;
samples[1][i + 5] *= _gain;
samples[1][i + 6] *= _gain;
samples[1][i + 7] *= _gain;
samples[1][i + 8] *= _gain;
samples[1][i + 9] *= _gain;
samples[1][i + 10] *= _gain;
samples[1][i + 11] *= _gain;
samples[1][i + 12] *= _gain;
samples[1][i + 13] *= _gain;
samples[1][i + 14] *= _gain;
samples[1][i + 15] *= _gain;
}
}
else {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[j][i] *= _gain;
}
assert("unsupported channel format");
}
}
else {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[j][i] *= _gain;
}
}
}
};
}
#endif // AudioGain_h

View file

@ -1,8 +1,8 @@
//
// AudioSourceTone.cpp
// AudioPan.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Created by Craig Hansen-Sturm on 9/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
@ -21,3 +21,32 @@ float32_t AudioPan::ONE_MINUS_EPSILON = 1.0f - EPSILON;
float32_t AudioPan::ZERO_PLUS_EPSILON = 0.0f + EPSILON;
float32_t AudioPan::ONE_HALF_MINUS_EPSILON = 0.5f - EPSILON;
float32_t AudioPan::ONE_HALF_PLUS_EPSILON = 0.5f + EPSILON;
AudioPan::AudioPan() {
initialize();
}
AudioPan::~AudioPan() {
finalize();
}
void AudioPan::initialize() {
setParameters(0.5f);
}
void AudioPan::finalize() {
}
void AudioPan::reset() {
initialize();
}
void AudioPan::setParameters(const float32_t pan) {
// pan ranges between 0.0 and 1.0f inclusive. 0.5f is midpoint between full left and full right
_pan = std::min(std::max(pan, 0.0f), 1.0f);
updateCoefficients();
}
void AudioPan::getParameters(float32_t& pan) {
pan = _pan;
}

View file

@ -23,118 +23,104 @@ class AudioPan
static float32_t ONE_HALF_MINUS_EPSILON;
static float32_t ONE_HALF_PLUS_EPSILON;
void updateCoefficients() {
// implement constant power sin^2 + cos^2 = 1 panning law
if (_pan >= ONE_MINUS_EPSILON) { // full right
_gainLeft = 0.0f;
_gainRight = 1.0f;
}
else if (_pan <= ZERO_PLUS_EPSILON) { // full left
_gainLeft = 1.0f;
_gainRight = 0.0f;
}
else if ((_pan >= ONE_HALF_MINUS_EPSILON) && (_pan <= ONE_HALF_PLUS_EPSILON)) { // center
_gainLeft = 1.0f / SQUARE_ROOT_OF_2;
_gainRight = 1.0f / SQUARE_ROOT_OF_2;
}
else { // intermediate cases
_gainLeft = cosf( TWO_PI * _pan );
_gainRight = sinf( TWO_PI * _pan );
}
}
void updateCoefficients();
public:
AudioPan() {
initialize();
AudioPan();
~AudioPan();
void initialize();
void finalize();
void reset();
void setParameters(const float32_t pan);
void getParameters(float32_t& pan);
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioPan::render(AudioBufferFloat32& frameBuffer) {
if (frameBuffer.getChannelCount() != 2) {
return;
}
~AudioPan() {
finalize();
}
float32_t** samples = frameBuffer.getFrameData();
void initialize() {
setParameters(0.5f);
}
void finalize() {
}
void reset() {
initialize();
}
void setParameters(const float32_t pan) {
// pan ranges between 0.0 and 1.0f inclusive. 0.5f is midpoint between full left and full right
_pan = std::min(std::max(pan, 0.0f), 1.0f);
updateCoefficients();
}
void getParameters(float32_t& pan) {
pan = _pan;
}
void render(AudioBufferFloat32& frameBuffer) {
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() != 2) {
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 2) {
if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gainLeft;
samples[0][i + 1] *= _gainLeft;
samples[0][i + 2] *= _gainLeft;
samples[0][i + 3] *= _gainLeft;
samples[0][i + 4] *= _gainLeft;
samples[0][i + 5] *= _gainLeft;
samples[0][i + 6] *= _gainLeft;
samples[0][i + 7] *= _gainLeft;
samples[0][i + 8] *= _gainLeft;
samples[0][i + 9] *= _gainLeft;
samples[0][i + 10] *= _gainLeft;
samples[0][i + 11] *= _gainLeft;
samples[0][i + 12] *= _gainLeft;
samples[0][i + 13] *= _gainLeft;
samples[0][i + 14] *= _gainLeft;
samples[0][i + 15] *= _gainLeft;
samples[1][i + 0] *= _gainRight;
samples[1][i + 1] *= _gainRight;
samples[1][i + 2] *= _gainRight;
samples[1][i + 3] *= _gainRight;
samples[1][i + 4] *= _gainRight;
samples[1][i + 5] *= _gainRight;
samples[1][i + 6] *= _gainRight;
samples[1][i + 7] *= _gainRight;
samples[1][i + 8] *= _gainRight;
samples[1][i + 9] *= _gainRight;
samples[1][i + 10] *= _gainRight;
samples[1][i + 11] *= _gainRight;
samples[1][i + 12] *= _gainRight;
samples[1][i + 13] *= _gainRight;
samples[1][i + 14] *= _gainRight;
samples[1][i + 15] *= _gainRight;
}
}
else {
assert("unsupported channel format");
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gainLeft;
samples[0][i + 1] *= _gainLeft;
samples[0][i + 2] *= _gainLeft;
samples[0][i + 3] *= _gainLeft;
samples[0][i + 4] *= _gainLeft;
samples[0][i + 5] *= _gainLeft;
samples[0][i + 6] *= _gainLeft;
samples[0][i + 7] *= _gainLeft;
samples[0][i + 8] *= _gainLeft;
samples[0][i + 9] *= _gainLeft;
samples[0][i + 10] *= _gainLeft;
samples[0][i + 11] *= _gainLeft;
samples[0][i + 12] *= _gainLeft;
samples[0][i + 13] *= _gainLeft;
samples[0][i + 14] *= _gainLeft;
samples[0][i + 15] *= _gainLeft;
samples[1][i + 0] *= _gainRight;
samples[1][i + 1] *= _gainRight;
samples[1][i + 2] *= _gainRight;
samples[1][i + 3] *= _gainRight;
samples[1][i + 4] *= _gainRight;
samples[1][i + 5] *= _gainRight;
samples[1][i + 6] *= _gainRight;
samples[1][i + 7] *= _gainRight;
samples[1][i + 8] *= _gainRight;
samples[1][i + 9] *= _gainRight;
samples[1][i + 10] *= _gainRight;
samples[1][i + 11] *= _gainRight;
samples[1][i + 12] *= _gainRight;
samples[1][i + 13] *= _gainRight;
samples[1][i + 14] *= _gainRight;
samples[1][i + 15] *= _gainRight;
}
}
else {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[0][i] *= _gainLeft;
samples[1][i] *= _gainRight;
}
assert("unsupported channel format");
}
}
};
else {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[0][i] *= _gainLeft;
samples[1][i] *= _gainRight;
}
}
}
inline void AudioPan::updateCoefficients() {
// implement constant power sin^2 + cos^2 = 1 panning law
if (_pan >= ONE_MINUS_EPSILON) { // full right
_gainLeft = 0.0f;
_gainRight = 1.0f;
}
else if (_pan <= ZERO_PLUS_EPSILON) { // full left
_gainLeft = 1.0f;
_gainRight = 0.0f;
}
else if ((_pan >= ONE_HALF_MINUS_EPSILON) && (_pan <= ONE_HALF_PLUS_EPSILON)) { // center
_gainLeft = 1.0f / SQUARE_ROOT_OF_2;
_gainRight = 1.0f / SQUARE_ROOT_OF_2;
}
else { // intermediate cases
_gainLeft = cosf( TWO_PI * _pan );
_gainRight = sinf( TWO_PI * _pan );
}
}
#endif // AudioPan_h